summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/Kconfig11
-rw-r--r--io_uring/Makefile25
-rw-r--r--io_uring/advise.c49
-rw-r--r--io_uring/alloc_cache.c44
-rw-r--r--io_uring/alloc_cache.h65
-rw-r--r--io_uring/cancel.c424
-rw-r--r--io_uring/cancel.h32
-rw-r--r--io_uring/cmd_net.c188
-rw-r--r--io_uring/epoll.c41
-rw-r--r--io_uring/epoll.h2
-rw-r--r--io_uring/eventfd.c168
-rw-r--r--io_uring/eventfd.h7
-rw-r--r--io_uring/fdinfo.c250
-rw-r--r--io_uring/filetable.c99
-rw-r--r--io_uring/filetable.h41
-rw-r--r--io_uring/fs.c34
-rw-r--r--io_uring/futex.c334
-rw-r--r--io_uring/futex.h37
-rw-r--r--io_uring/io-wq.c890
-rw-r--r--io_uring/io-wq.h15
-rw-r--r--io_uring/io_uring.c4030
-rw-r--r--io_uring/io_uring.h555
-rw-r--r--io_uring/kbuf.c780
-rw-r--r--io_uring/kbuf.h145
-rw-r--r--io_uring/memmap.c396
-rw-r--r--io_uring/memmap.h51
-rw-r--r--io_uring/mock_file.c350
-rw-r--r--io_uring/msg_ring.c217
-rw-r--r--io_uring/msg_ring.h1
-rw-r--r--io_uring/napi.c396
-rw-r--r--io_uring/napi.h88
-rw-r--r--io_uring/net.c1661
-rw-r--r--io_uring/net.h52
-rw-r--r--io_uring/nop.c76
-rw-r--r--io_uring/notif.c123
-rw-r--r--io_uring/notif.h14
-rw-r--r--io_uring/opdef.c557
-rw-r--r--io_uring/opdef.h37
-rw-r--r--io_uring/openclose.c216
-rw-r--r--io_uring/openclose.h6
-rw-r--r--io_uring/poll.c443
-rw-r--r--io_uring/poll.h24
-rw-r--r--io_uring/query.c136
-rw-r--r--io_uring/query.h9
-rw-r--r--io_uring/refs.h14
-rw-r--r--io_uring/register.c927
-rw-r--r--io_uring/register.h9
-rw-r--r--io_uring/rsrc.c1773
-rw-r--r--io_uring/rsrc.h205
-rw-r--r--io_uring/rw.c1271
-rw-r--r--io_uring/rw.h48
-rw-r--r--io_uring/slist.h45
-rw-r--r--io_uring/splice.c63
-rw-r--r--io_uring/splice.h1
-rw-r--r--io_uring/sqpoll.c234
-rw-r--r--io_uring/sqpoll.h13
-rw-r--r--io_uring/statx.c10
-rw-r--r--io_uring/sync.c20
-rw-r--r--io_uring/tctx.c49
-rw-r--r--io_uring/timeout.c221
-rw-r--r--io_uring/timeout.h15
-rw-r--r--io_uring/truncate.c48
-rw-r--r--io_uring/truncate.h4
-rw-r--r--io_uring/uring_cmd.c372
-rw-r--r--io_uring/uring_cmd.h31
-rw-r--r--io_uring/waitid.c348
-rw-r--r--io_uring/waitid.h15
-rw-r--r--io_uring/xattr.c119
-rw-r--r--io_uring/zcrx.c1485
-rw-r--r--io_uring/zcrx.h109
70 files changed, 14103 insertions, 6465 deletions
diff --git a/io_uring/Kconfig b/io_uring/Kconfig
new file mode 100644
index 000000000000..4b949c42c0bf
--- /dev/null
+++ b/io_uring/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# io_uring configuration
+#
+
+config IO_URING_ZCRX
+ def_bool y
+ depends on IO_URING
+ depends on PAGE_POOL
+ depends on INET
+ depends on NET_RX_BUSY_POLL
diff --git a/io_uring/Makefile b/io_uring/Makefile
index 8cc8e5387a75..bc4e4a3fa0a5 100644
--- a/io_uring/Makefile
+++ b/io_uring/Makefile
@@ -2,10 +2,23 @@
#
# Makefile for io_uring
-obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \
- sync.o advise.o filetable.o \
- openclose.o uring_cmd.o epoll.o \
- statx.o net.o msg_ring.o timeout.o \
- sqpoll.o fdinfo.o tctx.o poll.o \
- cancel.o kbuf.o rsrc.o rw.o opdef.o notif.o
+ifdef CONFIG_GCOV_PROFILE_URING
+GCOV_PROFILE := y
+endif
+
+obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
+ tctx.o filetable.o rw.o poll.o \
+ eventfd.o uring_cmd.o openclose.o \
+ sqpoll.o xattr.o nop.o fs.o splice.o \
+ sync.o msg_ring.o advise.o openclose.o \
+ statx.o timeout.o cancel.o \
+ waitid.o register.o truncate.o \
+ memmap.o alloc_cache.o query.o
+obj-$(CONFIG_IO_URING_ZCRX) += zcrx.o
obj-$(CONFIG_IO_WQ) += io-wq.o
+obj-$(CONFIG_FUTEX) += futex.o
+obj-$(CONFIG_EPOLL) += epoll.o
+obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
+obj-$(CONFIG_NET) += net.o cmd_net.o
+obj-$(CONFIG_PROC_FS) += fdinfo.o
+obj-$(CONFIG_IO_URING_MOCK_FILE) += mock_file.o
diff --git a/io_uring/advise.c b/io_uring/advise.c
index 449c6f14649f..0073f74e3658 100644
--- a/io_uring/advise.c
+++ b/io_uring/advise.c
@@ -17,14 +17,14 @@
struct io_fadvise {
struct file *file;
u64 offset;
- u32 len;
+ u64 len;
u32 advice;
};
struct io_madvise {
struct file *file;
u64 addr;
- u32 len;
+ u64 len;
u32 advice;
};
@@ -33,12 +33,15 @@ int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
- if (sqe->buf_index || sqe->off || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
ma->addr = READ_ONCE(sqe->addr);
- ma->len = READ_ONCE(sqe->len);
+ ma->len = READ_ONCE(sqe->off);
+ if (!ma->len)
+ ma->len = READ_ONCE(sqe->len);
ma->advice = READ_ONCE(sqe->fadvise_advice);
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
#else
return -EOPNOTSUPP;
@@ -51,27 +54,42 @@ int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
#else
return -EOPNOTSUPP;
#endif
}
+static bool io_fadvise_force_async(struct io_fadvise *fa)
+{
+ switch (fa->advice) {
+ case POSIX_FADV_NORMAL:
+ case POSIX_FADV_RANDOM:
+ case POSIX_FADV_SEQUENTIAL:
+ return false;
+ default:
+ return true;
+ }
+}
+
int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
- if (sqe->buf_index || sqe->addr || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
fa->offset = READ_ONCE(sqe->off);
- fa->len = READ_ONCE(sqe->len);
+ fa->len = READ_ONCE(sqe->addr);
+ if (!fa->len)
+ fa->len = READ_ONCE(sqe->len);
fa->advice = READ_ONCE(sqe->fadvise_advice);
+ if (io_fadvise_force_async(fa))
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -80,20 +98,11 @@ int io_fadvise(struct io_kiocb *req, unsigned int issue_flags)
struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK) {
- switch (fa->advice) {
- case POSIX_FADV_NORMAL:
- case POSIX_FADV_RANDOM:
- case POSIX_FADV_SEQUENTIAL:
- break;
- default:
- return -EAGAIN;
- }
- }
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa));
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
diff --git a/io_uring/alloc_cache.c b/io_uring/alloc_cache.c
new file mode 100644
index 000000000000..58423888b736
--- /dev/null
+++ b/io_uring/alloc_cache.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "alloc_cache.h"
+
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *))
+{
+ void *entry;
+
+ if (!cache->entries)
+ return;
+
+ while ((entry = io_alloc_cache_get(cache)) != NULL)
+ free(entry);
+
+ kvfree(cache->entries);
+ cache->entries = NULL;
+}
+
+/* returns false if the cache was initialized properly */
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes)
+{
+ cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
+ if (!cache->entries)
+ return true;
+
+ cache->nr_cached = 0;
+ cache->max_cached = max_nr;
+ cache->elem_size = size;
+ cache->init_clear = init_bytes;
+ return false;
+}
+
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp)
+{
+ void *obj;
+
+ obj = kmalloc(cache->elem_size, gfp);
+ if (obj && cache->init_clear)
+ memset(obj, 0, cache->init_clear);
+ return obj;
+}
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 729793ae9712..d33ce159ef33 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -1,53 +1,68 @@
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
+#include <linux/io_uring_types.h>
+
/*
* Don't allow the cache to grow beyond this size.
*/
-#define IO_ALLOC_CACHE_MAX 512
+#define IO_ALLOC_CACHE_MAX 128
+
+void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(const void *));
+bool io_alloc_cache_init(struct io_alloc_cache *cache,
+ unsigned max_nr, unsigned int size,
+ unsigned int init_bytes);
-struct io_cache_entry {
- struct hlist_node node;
-};
+void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
- struct io_cache_entry *entry)
+ void *entry)
{
- if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
- cache->nr_cached++;
- hlist_add_head(&entry->node, &cache->list);
+ if (cache->nr_cached < cache->max_cached) {
+ if (!kasan_mempool_poison_object(entry))
+ return false;
+ cache->entries[cache->nr_cached++] = entry;
return true;
}
return false;
}
-static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
+static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
{
- if (!hlist_empty(&cache->list)) {
- struct hlist_node *node = cache->list.first;
+ if (cache->nr_cached) {
+ void *entry = cache->entries[--cache->nr_cached];
- hlist_del(node);
- return container_of(node, struct io_cache_entry, node);
+ /*
+ * If KASAN is enabled, always clear the initial bytes that
+ * must be zeroed post alloc, in case any of them overlap
+ * with KASAN storage.
+ */
+#if defined(CONFIG_KASAN)
+ kasan_mempool_unpoison_object(entry, cache->elem_size);
+ if (cache->init_clear)
+ memset(entry, 0, cache->init_clear);
+#endif
+ return entry;
}
return NULL;
}
-static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
+static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
{
- INIT_HLIST_HEAD(&cache->list);
- cache->nr_cached = 0;
+ void *obj;
+
+ obj = io_alloc_cache_get(cache);
+ if (obj)
+ return obj;
+ return io_cache_alloc_new(cache, gfp);
}
-static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
- void (*free)(struct io_cache_entry *))
+static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
{
- while (!hlist_empty(&cache->list)) {
- struct hlist_node *node = cache->list.first;
-
- hlist_del(node);
- free(container_of(node, struct io_cache_entry, node));
- }
- cache->nr_cached = 0;
+ if (!io_alloc_cache_put(cache, obj))
+ kfree(obj);
}
+
#endif
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index b4f5dfacc0c3..ca12ac10c0ae 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -11,10 +11,15 @@
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "tctx.h"
+#include "sqpoll.h"
+#include "uring_cmd.h"
#include "poll.h"
#include "timeout.h"
+#include "waitid.h"
+#include "futex.h"
#include "cancel.h"
struct io_cancel {
@@ -22,35 +27,55 @@ struct io_cancel {
u64 addr;
u32 flags;
s32 fd;
+ u8 opcode;
};
#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
- IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED)
+ IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
+ IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
-static bool io_cancel_cb(struct io_wq_work *work, void *data)
+/*
+ * Returns true if the request matches the criteria outlined by 'cd'.
+ */
+bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_cancel_data *cd = data;
+ bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
if (req->ctx != cd->ctx)
return false;
- if (cd->flags & IORING_ASYNC_CANCEL_ANY) {
- ;
- } else if (cd->flags & IORING_ASYNC_CANCEL_FD) {
+
+ if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
+ match_user_data = true;
+
+ if (cd->flags & IORING_ASYNC_CANCEL_ANY)
+ goto check_seq;
+ if (cd->flags & IORING_ASYNC_CANCEL_FD) {
if (req->file != cd->file)
return false;
- } else {
- if (req->cqe.user_data != cd->data)
+ }
+ if (cd->flags & IORING_ASYNC_CANCEL_OP) {
+ if (req->opcode != cd->opcode)
return false;
}
- if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
- if (cd->seq == req->work.cancel_seq)
+ if (match_user_data && req->cqe.user_data != cd->data)
+ return false;
+ if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
+check_seq:
+ if (io_cancel_match_sequence(req, cd->seq))
return false;
- req->work.cancel_seq = cd->seq;
}
+
return true;
}
+static bool io_cancel_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_cancel_data *cd = data;
+
+ return io_cancel_req_match(req, cd);
+}
+
static int io_async_cancel_one(struct io_uring_task *tctx,
struct io_cancel_data *cd)
{
@@ -98,6 +123,14 @@ int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
if (ret != -ENOENT)
return ret;
+ ret = io_waitid_cancel(ctx, cd, issue_flags);
+ if (ret != -ENOENT)
+ return ret;
+
+ ret = io_futex_cancel(ctx, cd, issue_flags);
+ if (ret != -ENOENT)
+ return ret;
+
spin_lock(&ctx->completion_lock);
if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
ret = io_timeout_cancel(ctx, cd);
@@ -111,7 +144,7 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
- if (sqe->off || sqe->len || sqe->splice_fd_in)
+ if (sqe->off || sqe->splice_fd_in)
return -EINVAL;
cancel->addr = READ_ONCE(sqe->addr);
@@ -123,6 +156,11 @@ int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
cancel->fd = READ_ONCE(sqe->fd);
}
+ if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
+ if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
+ return -EINVAL;
+ cancel->opcode = READ_ONCE(sqe->len);
+ }
return 0;
}
@@ -149,9 +187,7 @@ static int __io_async_cancel(struct io_cancel_data *cd,
io_ring_submit_lock(ctx, issue_flags);
ret = -ENOENT;
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
-
- ret = io_async_cancel_one(tctx, cd);
+ ret = io_async_cancel_one(node->task->io_uring, cd);
if (ret != -ENOENT) {
if (!all)
break;
@@ -169,9 +205,10 @@ int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
.ctx = req->ctx,
.data = cancel->addr,
.flags = cancel->flags,
+ .opcode = cancel->opcode,
.seq = atomic_inc_return(&req->ctx->cancel_seq),
};
- struct io_uring_task *tctx = req->task->io_uring;
+ struct io_uring_task *tctx = req->tctx;
int ret;
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
@@ -195,17 +232,7 @@ done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
-void init_hash_table(struct io_hash_table *table, unsigned size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++) {
- spin_lock_init(&table->hbs[i].lock);
- INIT_HLIST_HEAD(&table->hbs[i].list);
- }
+ return IOU_COMPLETE;
}
static int __io_sync_cancel(struct io_uring_task *tctx,
@@ -216,13 +243,12 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
/* fixed must be grabbed every time since we drop the uring_lock */
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
- unsigned long file_ptr;
+ struct io_rsrc_node *node;
- if (unlikely(fd >= ctx->nr_user_files))
+ node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
+ if (unlikely(!node))
return -EBADF;
- fd = array_index_nospec(fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
- cd->file = (struct file *) (file_ptr & FFS_MASK);
+ cd->file = io_slot_file(node);
if (!cd->file)
return -EBADF;
}
@@ -239,27 +265,32 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
};
ktime_t timeout = KTIME_MAX;
struct io_uring_sync_cancel_reg sc;
- struct fd f = { };
+ struct file *file = NULL;
DEFINE_WAIT(wait);
- int ret;
+ int ret, i;
if (copy_from_user(&sc, arg, sizeof(sc)))
return -EFAULT;
if (sc.flags & ~CANCEL_FLAGS)
return -EINVAL;
- if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
+ if (sc.pad[i])
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
+ if (sc.pad2[i])
+ return -EINVAL;
cd.data = sc.addr;
cd.flags = sc.flags;
+ cd.opcode = sc.opcode;
/* we can grab a normal file descriptor upfront */
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
- f = fdget(sc.fd);
- if (!f.file)
+ file = fget(sc.fd);
+ if (!file)
return -EBADF;
- cd.file = f.file;
+ cd.file = file;
}
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
@@ -309,6 +340,317 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
if (ret == -ENOENT || ret > 0)
ret = 0;
out:
- fdput(f);
+ if (file)
+ fput(file);
return ret;
}
+
+bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ struct hlist_head *list, bool cancel_all,
+ bool (*cancel)(struct io_kiocb *))
+{
+ struct hlist_node *tmp;
+ struct io_kiocb *req;
+ bool found = false;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+ if (!io_match_task_safe(req, tctx, cancel_all))
+ continue;
+ hlist_del_init(&req->hash_node);
+ if (cancel(req))
+ found = true;
+ }
+
+ return found;
+}
+
+int io_cancel_remove(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags, struct hlist_head *list,
+ bool (*cancel)(struct io_kiocb *))
+{
+ struct hlist_node *tmp;
+ struct io_kiocb *req;
+ int nr = 0;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ hlist_for_each_entry_safe(req, tmp, list, hash_node) {
+ if (!io_cancel_req_match(req, cd))
+ continue;
+ if (cancel(req))
+ nr++;
+ if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
+ break;
+ }
+ io_ring_submit_unlock(ctx, issue_flags);
+ return nr ?: -ENOENT;
+}
+
+static bool io_match_linked(struct io_kiocb *head)
+{
+ struct io_kiocb *req;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
+ bool cancel_all)
+{
+ bool matched;
+
+ if (tctx && head->tctx != tctx)
+ return false;
+ if (cancel_all)
+ return true;
+
+ if (head->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = head->ctx;
+
+ /* protect against races with linked timeouts */
+ raw_spin_lock_irq(&ctx->timeout_lock);
+ matched = io_match_linked(head);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ matched = io_match_linked(head);
+ }
+ return matched;
+}
+
+void __io_uring_cancel(bool cancel_all)
+{
+ io_uring_unreg_ringfd();
+ io_uring_cancel_generic(cancel_all, NULL);
+}
+
+struct io_task_cancel {
+ struct io_uring_task *tctx;
+ bool all;
+};
+
+static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_task_cancel *cancel = data;
+
+ return io_match_task_safe(req, cancel->tctx, cancel->all);
+}
+
+static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx,
+ bool cancel_all)
+{
+ struct io_defer_entry *de;
+ LIST_HEAD(list);
+
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_task_safe(de->req, tctx, cancel_all)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ if (list_empty(&list))
+ return false;
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ ctx->nr_drained -= io_linked_nr(de->req);
+ io_req_task_queue_fail(de->req, -ECANCELED);
+ kfree(de);
+ }
+ return true;
+}
+
+__cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+
+ return req->ctx == data;
+}
+
+static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
+{
+ struct io_tctx_node *node;
+ enum io_wq_cancel cret;
+ bool ret = false;
+
+ mutex_lock(&ctx->uring_lock);
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ struct io_uring_task *tctx = node->task->io_uring;
+
+ /*
+ * io_wq will stay alive while we hold uring_lock, because it's
+ * killed after ctx nodes, which requires to take the lock.
+ */
+ if (!tctx || !tctx->io_wq)
+ continue;
+ cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
+ mutex_unlock(&ctx->uring_lock);
+
+ return ret;
+}
+
+__cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx,
+ bool cancel_all, bool is_sqpoll_thread)
+{
+ struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, };
+ enum io_wq_cancel cret;
+ bool ret = false;
+
+ /* set it so io_req_local_work_add() would wake us up */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, 1);
+ smp_mb();
+ }
+
+ /* failed during ring init, it couldn't have issued any requests */
+ if (!ctx->rings)
+ return false;
+
+ if (!tctx) {
+ ret |= io_uring_try_cancel_iowq(ctx);
+ } else if (tctx->io_wq) {
+ /*
+ * Cancels requests of all rings, not only @ctx, but
+ * it's fine as the task is in exit/exec.
+ */
+ cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
+ &cancel, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
+
+ /* SQPOLL thread does its own polling */
+ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
+ is_sqpoll_thread) {
+ while (!wq_list_empty(&ctx->iopoll_list)) {
+ io_iopoll_try_reap_events(ctx);
+ ret = true;
+ cond_resched();
+ }
+ }
+
+ if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
+ io_allowed_defer_tw_run(ctx))
+ ret |= io_run_local_work(ctx, INT_MAX, INT_MAX) > 0;
+ mutex_lock(&ctx->uring_lock);
+ ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
+ ret |= io_poll_remove_all(ctx, tctx, cancel_all);
+ ret |= io_waitid_remove_all(ctx, tctx, cancel_all);
+ ret |= io_futex_remove_all(ctx, tctx, cancel_all);
+ ret |= io_uring_try_cancel_uring_cmd(ctx, tctx, cancel_all);
+ mutex_unlock(&ctx->uring_lock);
+ ret |= io_kill_timeouts(ctx, tctx, cancel_all);
+ if (tctx)
+ ret |= io_run_task_work() > 0;
+ else
+ ret |= flush_delayed_work(&ctx->fallback_work);
+ return ret;
+}
+
+static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
+{
+ if (tracked)
+ return atomic_read(&tctx->inflight_tracked);
+ return percpu_counter_sum(&tctx->inflight);
+}
+
+/*
+ * Find any io_uring ctx that this task has registered or done IO on, and cancel
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
+ */
+__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
+{
+ struct io_uring_task *tctx = current->io_uring;
+ struct io_ring_ctx *ctx;
+ struct io_tctx_node *node;
+ unsigned long index;
+ s64 inflight;
+ DEFINE_WAIT(wait);
+
+ WARN_ON_ONCE(sqd && sqpoll_task_locked(sqd) != current);
+
+ if (!current->io_uring)
+ return;
+ if (tctx->io_wq)
+ io_wq_exit_start(tctx->io_wq);
+
+ atomic_inc(&tctx->in_cancel);
+ do {
+ bool loop = false;
+
+ io_uring_drop_tctx_refs(current);
+ if (!tctx_inflight(tctx, !cancel_all))
+ break;
+
+ /* read completions before cancelations */
+ inflight = tctx_inflight(tctx, false);
+ if (!inflight)
+ break;
+
+ if (!sqd) {
+ xa_for_each(&tctx->xa, index, node) {
+ /* sqpoll task will cancel all its requests */
+ if (node->ctx->sq_data)
+ continue;
+ loop |= io_uring_try_cancel_requests(node->ctx,
+ current->io_uring,
+ cancel_all,
+ false);
+ }
+ } else {
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
+ loop |= io_uring_try_cancel_requests(ctx,
+ current->io_uring,
+ cancel_all,
+ true);
+ }
+
+ if (loop) {
+ cond_resched();
+ continue;
+ }
+
+ prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+ io_run_task_work();
+ io_uring_drop_tctx_refs(current);
+ xa_for_each(&tctx->xa, index, node) {
+ if (io_local_work_pending(node->ctx)) {
+ WARN_ON_ONCE(node->ctx->submitter_task &&
+ node->ctx->submitter_task != current);
+ goto end_wait;
+ }
+ }
+ /*
+ * If we've seen completions, retry without waiting. This
+ * avoids a race where a completion comes in before we did
+ * prepare_to_wait().
+ */
+ if (inflight == tctx_inflight(tctx, !cancel_all))
+ schedule();
+end_wait:
+ finish_wait(&tctx->wait, &wait);
+ } while (1);
+
+ io_uring_clean_tctx(tctx);
+ if (cancel_all) {
+ /*
+ * We shouldn't run task_works after cancel, so just leave
+ * ->in_cancel set for normal exit.
+ */
+ atomic_dec(&tctx->in_cancel);
+ /* for exec all current's requests should be gone, kill tctx */
+ __io_uring_free(current);
+ }
+}
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index 6a59ee484d0c..6783961ede1b 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#ifndef IORING_CANCEL_H
+#define IORING_CANCEL_H
#include <linux/io_uring_types.h>
@@ -8,16 +10,42 @@ struct io_cancel_data {
u64 data;
struct file *file;
};
+ u8 opcode;
u32 flags;
int seq;
};
-
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
unsigned int issue_flags);
-void init_hash_table(struct io_hash_table *table, unsigned size);
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
+bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
+bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
+ bool cancel_all);
+
+bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ struct hlist_head *list, bool cancel_all,
+ bool (*cancel)(struct io_kiocb *));
+int io_cancel_remove(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags, struct hlist_head *list,
+ bool (*cancel)(struct io_kiocb *));
+__cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx,
+ bool cancel_all, bool is_sqpoll_thread);
+__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
+__cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data);
+
+static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
+{
+ if (req->cancel_seq_set && sequence == req->work.cancel_seq)
+ return true;
+
+ req->cancel_seq_set = true;
+ req->work.cancel_seq = sequence;
+ return false;
+}
+
+#endif
diff --git a/io_uring/cmd_net.c b/io_uring/cmd_net.c
new file mode 100644
index 000000000000..19d3ce2bd20a
--- /dev/null
+++ b/io_uring/cmd_net.c
@@ -0,0 +1,188 @@
+#include <asm/ioctls.h>
+#include <linux/io_uring/net.h>
+#include <linux/errqueue.h>
+#include <net/sock.h>
+
+#include "uring_cmd.h"
+#include "io_uring.h"
+
+static inline int io_uring_cmd_getsockopt(struct socket *sock,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ bool compat = !!(issue_flags & IO_URING_F_COMPAT);
+ int optlen, optname, level, err;
+ void __user *optval;
+
+ level = READ_ONCE(sqe->level);
+ if (level != SOL_SOCKET)
+ return -EOPNOTSUPP;
+
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
+
+ err = do_sock_getsockopt(sock, compat, level, optname,
+ USER_SOCKPTR(optval),
+ KERNEL_SOCKPTR(&optlen));
+ if (err)
+ return err;
+
+ /* On success, return optlen */
+ return optlen;
+}
+
+static inline int io_uring_cmd_setsockopt(struct socket *sock,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ bool compat = !!(issue_flags & IO_URING_F_COMPAT);
+ int optname, optlen, level;
+ void __user *optval;
+ sockptr_t optval_s;
+
+ optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
+ optname = READ_ONCE(sqe->optname);
+ optlen = READ_ONCE(sqe->optlen);
+ level = READ_ONCE(sqe->level);
+ optval_s = USER_SOCKPTR(optval);
+
+ return do_sock_setsockopt(sock, compat, level, optname, optval_s,
+ optlen);
+}
+
+static bool io_process_timestamp_skb(struct io_uring_cmd *cmd, struct sock *sk,
+ struct sk_buff *skb, unsigned issue_flags)
+{
+ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
+ struct io_uring_cqe cqe[2];
+ struct io_timespec *iots;
+ struct timespec64 ts;
+ u32 tstype, tskey;
+ int ret;
+
+ BUILD_BUG_ON(sizeof(struct io_uring_cqe) != sizeof(struct io_timespec));
+
+ ret = skb_get_tx_timestamp(skb, sk, &ts);
+ if (ret < 0)
+ return false;
+
+ tskey = serr->ee.ee_data;
+ tstype = serr->ee.ee_info;
+
+ cqe->user_data = 0;
+ cqe->res = tskey;
+ cqe->flags = IORING_CQE_F_MORE | ctx_cqe32_flags(cmd_to_io_kiocb(cmd)->ctx);
+ cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT;
+ if (ret == SOF_TIMESTAMPING_TX_HARDWARE)
+ cqe->flags |= IORING_CQE_F_TSTAMP_HW;
+
+ iots = (struct io_timespec *)&cqe[1];
+ iots->tv_sec = ts.tv_sec;
+ iots->tv_nsec = ts.tv_nsec;
+ return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe);
+}
+
+static int io_uring_cmd_timestamp(struct socket *sock,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff_head *q = &sk->sk_error_queue;
+ struct sk_buff *skb, *tmp;
+ struct sk_buff_head list;
+ int ret;
+
+ if (!(issue_flags & IO_URING_F_CQE32))
+ return -EINVAL;
+ ret = io_cmd_poll_multishot(cmd, issue_flags, EPOLLERR);
+ if (unlikely(ret))
+ return ret;
+
+ if (skb_queue_empty_lockless(q))
+ return -EAGAIN;
+ __skb_queue_head_init(&list);
+
+ scoped_guard(spinlock_irq, &q->lock) {
+ skb_queue_walk_safe(q, skb, tmp) {
+ /* don't support skbs with payload */
+ if (!skb_has_tx_timestamp(skb, sk) || skb->len)
+ continue;
+ __skb_unlink(skb, q);
+ __skb_queue_tail(&list, skb);
+ }
+ }
+
+ while (1) {
+ skb = skb_peek(&list);
+ if (!skb)
+ break;
+ if (!io_process_timestamp_skb(cmd, sk, skb, issue_flags))
+ break;
+ __skb_dequeue(&list);
+ consume_skb(skb);
+ }
+
+ if (!unlikely(skb_queue_empty(&list))) {
+ scoped_guard(spinlock_irqsave, &q->lock)
+ skb_queue_splice(&list, q);
+ }
+ return -EAGAIN;
+}
+
+static int io_uring_cmd_getsockname(struct socket *sock,
+ struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ struct sockaddr __user *uaddr;
+ unsigned int peer;
+ int __user *ulen;
+
+ if (sqe->ioprio || sqe->__pad1 || sqe->len || sqe->rw_flags)
+ return -EINVAL;
+
+ uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ ulen = u64_to_user_ptr(sqe->addr3);
+ peer = READ_ONCE(sqe->optlen);
+ if (peer > 1)
+ return -EINVAL;
+ return do_getsockname(sock, peer, uaddr, ulen);
+}
+
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ struct socket *sock = cmd->file->private_data;
+ struct sock *sk = sock->sk;
+ struct proto *prot = READ_ONCE(sk->sk_prot);
+ int ret, arg = 0;
+
+ if (!prot || !prot->ioctl)
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd_op) {
+ case SOCKET_URING_OP_SIOCINQ:
+ ret = prot->ioctl(sk, SIOCINQ, &arg);
+ if (ret)
+ return ret;
+ return arg;
+ case SOCKET_URING_OP_SIOCOUTQ:
+ ret = prot->ioctl(sk, SIOCOUTQ, &arg);
+ if (ret)
+ return ret;
+ return arg;
+ case SOCKET_URING_OP_GETSOCKOPT:
+ return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
+ case SOCKET_URING_OP_SETSOCKOPT:
+ return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
+ case SOCKET_URING_OP_TX_TIMESTAMP:
+ return io_uring_cmd_timestamp(sock, cmd, issue_flags);
+ case SOCKET_URING_OP_GETSOCKNAME:
+ return io_uring_cmd_getsockname(sock, cmd, issue_flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
diff --git a/io_uring/epoll.c b/io_uring/epoll.c
index 9aa74d2c80bc..8d4610246ba0 100644
--- a/io_uring/epoll.c
+++ b/io_uring/epoll.c
@@ -12,7 +12,6 @@
#include "io_uring.h"
#include "epoll.h"
-#if defined(CONFIG_EPOLL)
struct io_epoll {
struct file *file;
int epfd;
@@ -21,14 +20,16 @@ struct io_epoll {
struct epoll_event event;
};
+struct io_epoll_wait {
+ struct file *file;
+ int maxevents;
+ struct epoll_event __user *events;
+};
+
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
- pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
- "be removed in a future Linux kernel version.\n",
- current->comm);
-
if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
@@ -60,6 +61,32 @@ int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
+}
+
+int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_epoll_wait *iew = io_kiocb_to_cmd(req, struct io_epoll_wait);
+
+ if (sqe->off || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ return -EINVAL;
+
+ iew->maxevents = READ_ONCE(sqe->len);
+ iew->events = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ return 0;
+}
+
+int io_epoll_wait(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_epoll_wait *iew = io_kiocb_to_cmd(req, struct io_epoll_wait);
+ int ret;
+
+ ret = epoll_sendevents(req->file, iew->events, iew->maxevents);
+ if (ret == 0)
+ return -EAGAIN;
+ if (ret < 0)
+ req_set_fail(req);
+
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
}
-#endif
diff --git a/io_uring/epoll.h b/io_uring/epoll.h
index 870cce11ba98..4111997c360b 100644
--- a/io_uring/epoll.h
+++ b/io_uring/epoll.h
@@ -3,4 +3,6 @@
#if defined(CONFIG_EPOLL)
int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags);
+int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_epoll_wait(struct io_kiocb *req, unsigned int issue_flags);
#endif
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
new file mode 100644
index 000000000000..78f8ab7db104
--- /dev/null
+++ b/io_uring/eventfd.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/eventfd.h>
+#include <linux/eventpoll.h>
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+#include "io-wq.h"
+#include "eventfd.h"
+
+struct io_ev_fd {
+ struct eventfd_ctx *cq_ev_fd;
+ unsigned int eventfd_async;
+ /* protected by ->completion_lock */
+ unsigned last_cq_tail;
+ refcount_t refs;
+ atomic_t ops;
+ struct rcu_head rcu;
+};
+
+enum {
+ IO_EVENTFD_OP_SIGNAL_BIT,
+};
+
+static void io_eventfd_free(struct rcu_head *rcu)
+{
+ struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+
+ eventfd_ctx_put(ev_fd->cq_ev_fd);
+ kfree(ev_fd);
+}
+
+static void io_eventfd_put(struct io_ev_fd *ev_fd)
+{
+ if (refcount_dec_and_test(&ev_fd->refs))
+ call_rcu(&ev_fd->rcu, io_eventfd_free);
+}
+
+static void io_eventfd_do_signal(struct rcu_head *rcu)
+{
+ struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
+
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+ io_eventfd_put(ev_fd);
+}
+
+/*
+ * Returns true if the caller should put the ev_fd reference, false if not.
+ */
+static bool __io_eventfd_signal(struct io_ev_fd *ev_fd)
+{
+ if (eventfd_signal_allowed()) {
+ eventfd_signal_mask(ev_fd->cq_ev_fd, EPOLL_URING_WAKE);
+ return true;
+ }
+ if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops)) {
+ call_rcu_hurry(&ev_fd->rcu, io_eventfd_do_signal);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Trigger if eventfd_async isn't set, or if it's set and the caller is
+ * an async worker.
+ */
+static bool io_eventfd_trigger(struct io_ev_fd *ev_fd)
+{
+ return !ev_fd->eventfd_async || io_wq_current_is_worker();
+}
+
+void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event)
+{
+ bool skip = false;
+ struct io_ev_fd *ev_fd;
+
+ if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
+ return;
+
+ guard(rcu)();
+ ev_fd = rcu_dereference(ctx->io_ev_fd);
+ /*
+ * Check again if ev_fd exists in case an io_eventfd_unregister call
+ * completed between the NULL check of ctx->io_ev_fd at the start of
+ * the function and rcu_read_lock.
+ */
+ if (!ev_fd)
+ return;
+ if (!io_eventfd_trigger(ev_fd) || !refcount_inc_not_zero(&ev_fd->refs))
+ return;
+
+ if (cqe_event) {
+ /*
+ * Eventfd should only get triggered when at least one event
+ * has been posted. Some applications rely on the eventfd
+ * notification count only changing IFF a new CQE has been
+ * added to the CQ ring. There's no dependency on 1:1
+ * relationship between how many times this function is called
+ * (and hence the eventfd count) and number of CQEs posted to
+ * the CQ ring.
+ */
+ spin_lock(&ctx->completion_lock);
+ skip = ctx->cached_cq_tail == ev_fd->last_cq_tail;
+ ev_fd->last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+ }
+
+ if (skip || __io_eventfd_signal(ev_fd))
+ io_eventfd_put(ev_fd);
+}
+
+int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned int eventfd_async)
+{
+ struct io_ev_fd *ev_fd;
+ __s32 __user *fds = arg;
+ int fd;
+
+ ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
+ lockdep_is_held(&ctx->uring_lock));
+ if (ev_fd)
+ return -EBUSY;
+
+ if (copy_from_user(&fd, fds, sizeof(*fds)))
+ return -EFAULT;
+
+ ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
+ if (!ev_fd)
+ return -ENOMEM;
+
+ ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
+ if (IS_ERR(ev_fd->cq_ev_fd)) {
+ int ret = PTR_ERR(ev_fd->cq_ev_fd);
+
+ kfree(ev_fd);
+ return ret;
+ }
+
+ spin_lock(&ctx->completion_lock);
+ ev_fd->last_cq_tail = ctx->cached_cq_tail;
+ spin_unlock(&ctx->completion_lock);
+
+ ev_fd->eventfd_async = eventfd_async;
+ ctx->has_evfd = true;
+ refcount_set(&ev_fd->refs, 1);
+ atomic_set(&ev_fd->ops, 0);
+ rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
+ return 0;
+}
+
+int io_eventfd_unregister(struct io_ring_ctx *ctx)
+{
+ struct io_ev_fd *ev_fd;
+
+ ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
+ lockdep_is_held(&ctx->uring_lock));
+ if (ev_fd) {
+ ctx->has_evfd = false;
+ rcu_assign_pointer(ctx->io_ev_fd, NULL);
+ io_eventfd_put(ev_fd);
+ return 0;
+ }
+
+ return -ENXIO;
+}
diff --git a/io_uring/eventfd.h b/io_uring/eventfd.h
new file mode 100644
index 000000000000..e2f1985c2cf9
--- /dev/null
+++ b/io_uring/eventfd.h
@@ -0,0 +1,7 @@
+
+struct io_ring_ctx;
+int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned int eventfd_async);
+int io_eventfd_unregister(struct io_ring_ctx *ctx);
+
+void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event);
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index 882bd56b01ed..a87d4e26eee8 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -5,53 +5,60 @@
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/nospec.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
-#include "io_uring.h"
+#include "filetable.h"
#include "sqpoll.h"
#include "fdinfo.h"
#include "cancel.h"
#include "rsrc.h"
+#include "opdef.h"
-#ifdef CONFIG_PROC_FS
-static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
- const struct cred *cred)
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static __cold void common_tracking_show_fdinfo(struct io_ring_ctx *ctx,
+ struct seq_file *m,
+ const char *tracking_strategy)
{
- struct user_namespace *uns = seq_user_ns(m);
- struct group_info *gi;
- kernel_cap_t cap;
- unsigned __capi;
- int g;
-
- seq_printf(m, "%5d\n", id);
- seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
- seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
- seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
- seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
- seq_puts(m, "\n\tGroups:\t");
- gi = cred->group_info;
- for (g = 0; g < gi->ngroups; g++) {
- seq_put_decimal_ull(m, g ? " " : "",
- from_kgid_munged(uns, gi->gid[g]));
+ seq_puts(m, "NAPI:\tenabled\n");
+ seq_printf(m, "napi tracking:\t%s\n", tracking_strategy);
+ seq_printf(m, "napi_busy_poll_dt:\t%llu\n", ctx->napi_busy_poll_dt);
+ if (ctx->napi_prefer_busy_poll)
+ seq_puts(m, "napi_prefer_busy_poll:\ttrue\n");
+ else
+ seq_puts(m, "napi_prefer_busy_poll:\tfalse\n");
+}
+
+static __cold void napi_show_fdinfo(struct io_ring_ctx *ctx,
+ struct seq_file *m)
+{
+ unsigned int mode = READ_ONCE(ctx->napi_track_mode);
+
+ switch (mode) {
+ case IO_URING_NAPI_TRACKING_INACTIVE:
+ seq_puts(m, "NAPI:\tdisabled\n");
+ break;
+ case IO_URING_NAPI_TRACKING_DYNAMIC:
+ common_tracking_show_fdinfo(ctx, m, "dynamic");
+ break;
+ case IO_URING_NAPI_TRACKING_STATIC:
+ common_tracking_show_fdinfo(ctx, m, "static");
+ break;
+ default:
+ seq_printf(m, "NAPI:\tunknown mode (%u)\n", mode);
}
- seq_puts(m, "\n\tCapEff:\t");
- cap = cred->cap_effective;
- CAP_FOR_EACH_U32(__capi)
- seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
- seq_putc(m, '\n');
- return 0;
}
+#else
+static inline void napi_show_fdinfo(struct io_ring_ctx *ctx,
+ struct seq_file *m)
+{
+}
+#endif
-static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
- struct seq_file *m)
+static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
{
- struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
@@ -59,14 +66,12 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
unsigned int sq_tail = READ_ONCE(r->sq.tail);
unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail);
- unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
- unsigned int sq_entries, cq_entries;
- bool has_lock;
+ unsigned int sq_entries;
+ int sq_pid = -1, sq_cpu = -1;
+ u64 sq_total_time = 0, sq_work_time = 0;
unsigned int i;
- if (ctx->flags & IORING_SETUP_CQE32)
- cq_shift = 1;
if (ctx->flags & IORING_SETUP_SQE128)
sq_shift = 1;
@@ -79,30 +84,57 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
seq_printf(m, "SqHead:\t%u\n", sq_head);
seq_printf(m, "SqTail:\t%u\n", sq_tail);
- seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
+ seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head));
seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail);
- seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
+ seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail));
seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head;
struct io_uring_sqe *sqe;
unsigned int sq_idx;
+ bool sqe128 = false;
+ u8 opcode;
- sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
+ if (ctx->flags & IORING_SETUP_NO_SQARRAY)
+ sq_idx = entry & sq_mask;
+ else
+ sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask)
continue;
+
sqe = &ctx->sq_sqes[sq_idx << sq_shift];
+ opcode = READ_ONCE(sqe->opcode);
+ if (opcode >= IORING_OP_LAST)
+ continue;
+ opcode = array_index_nospec(opcode, IORING_OP_LAST);
+ if (sq_shift) {
+ sqe128 = true;
+ } else if (io_issue_defs[opcode].is_128) {
+ if (!(ctx->flags & IORING_SETUP_SQE_MIXED)) {
+ seq_printf(m,
+ "%5u: invalid sqe, 128B entry on non-mixed sq\n",
+ sq_idx);
+ break;
+ }
+ if ((++sq_head & sq_mask) == 0) {
+ seq_printf(m,
+ "%5u: corrupted sqe, wrapping 128B entry\n",
+ sq_idx);
+ break;
+ }
+ sqe128 = true;
+ }
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
"addr:0x%llx, rw_flags:0x%x, buf_index:%d "
"user_data:%llu",
- sq_idx, io_uring_get_opcode(sqe->opcode), sqe->fd,
+ sq_idx, io_uring_get_opcode(opcode), sqe->fd,
sqe->flags, (unsigned long long) sqe->off,
(unsigned long long) sqe->addr, sqe->rw_flags,
sqe->buf_index, sqe->user_data);
- if (sq_shift) {
+ if (sqe128) {
u64 *sqeb = (void *) (sqe + 1);
int size = sizeof(struct io_uring_sqe) / sizeof(u64);
int j;
@@ -116,83 +148,89 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
seq_printf(m, "\n");
}
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
- cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
- for (i = 0; i < cq_entries; i++) {
- unsigned int entry = i + cq_head;
- struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
+ while (cq_head < cq_tail) {
+ struct io_uring_cqe *cqe;
+ bool cqe32 = false;
- seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
- entry & cq_mask, cqe->user_data, cqe->res,
+ cqe = &r->cqes[(cq_head & cq_mask)];
+ if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32)
+ cqe32 = true;
+ seq_printf(m, "%5u: user_data:%llu, res:%d, flags:%x",
+ cq_head & cq_mask, cqe->user_data, cqe->res,
cqe->flags);
- if (cq_shift)
+ if (cqe32)
seq_printf(m, ", extra1:%llu, extra2:%llu\n",
cqe->big_cqe[0], cqe->big_cqe[1]);
seq_printf(m, "\n");
+ cq_head++;
+ if (cqe32)
+ cq_head++;
}
- /*
- * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
- * since fdinfo case grabs it in the opposite direction of normal use
- * cases. If we fail to get the lock, we just don't iterate any
- * structures that could be going away outside the io_uring mutex.
- */
- has_lock = mutex_trylock(&ctx->uring_lock);
-
- if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
- }
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ struct io_sq_data *sq = ctx->sq_data;
+ struct task_struct *tsk;
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
- seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
- for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
- struct file *f = io_file_from_index(&ctx->file_table, i);
+ rcu_read_lock();
+ tsk = rcu_dereference(sq->thread);
+ /*
+ * sq->thread might be NULL if we raced with the sqpoll
+ * thread termination.
+ */
+ if (tsk) {
+ u64 usec;
- if (f)
- seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
- else
- seq_printf(m, "%5u: <none>\n", i);
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ usec = io_sq_cpu_usec(tsk);
+ put_task_struct(tsk);
+ sq_pid = sq->task_pid;
+ sq_cpu = sq->sq_cpu;
+ sq_total_time = usec;
+ sq_work_time = sq->work_time;
+ } else {
+ rcu_read_unlock();
+ }
}
- seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
- for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *buf = ctx->user_bufs[i];
- unsigned int len = buf->ubuf_end - buf->ubuf;
- seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
+ seq_printf(m, "SqTotalTime:\t%llu\n", sq_total_time);
+ seq_printf(m, "SqWorkTime:\t%llu\n", sq_work_time);
+ seq_printf(m, "UserFiles:\t%u\n", ctx->file_table.data.nr);
+ for (i = 0; i < ctx->file_table.data.nr; i++) {
+ struct file *f = NULL;
+
+ if (ctx->file_table.data.nodes[i])
+ f = io_slot_file(ctx->file_table.data.nodes[i]);
+ if (f) {
+ seq_printf(m, "%5u: ", i);
+ seq_file_path(m, f, " \t\n\\");
+ seq_puts(m, "\n");
+ }
}
- if (has_lock && !xa_empty(&ctx->personalities)) {
- unsigned long index;
- const struct cred *cred;
+ seq_printf(m, "UserBufs:\t%u\n", ctx->buf_table.nr);
+ for (i = 0; i < ctx->buf_table.nr; i++) {
+ struct io_mapped_ubuf *buf = NULL;
- seq_printf(m, "Personalities:\n");
- xa_for_each(&ctx->personalities, index, cred)
- io_uring_show_cred(m, index, cred);
+ if (ctx->buf_table.nodes[i])
+ buf = ctx->buf_table.nodes[i]->buf;
+ if (buf)
+ seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, buf->len);
+ else
+ seq_printf(m, "%5u: <none>\n", i);
}
seq_puts(m, "PollList:\n");
for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
- struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
struct io_kiocb *req;
- spin_lock(&hb->lock);
hlist_for_each_entry(req, &hb->list, hash_node)
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
- task_work_pending(req->task));
- spin_unlock(&hb->lock);
-
- if (!has_lock)
- continue;
- hlist_for_each_entry(req, &hbl->list, hash_node)
- seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
- task_work_pending(req->task));
+ task_work_pending(req->tctx->task));
}
- if (has_lock)
- mutex_unlock(&ctx->uring_lock);
-
seq_puts(m, "CqOverflowList:\n");
spin_lock(&ctx->completion_lock);
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
@@ -202,17 +240,25 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
cqe->user_data, cqe->res, cqe->flags);
}
-
spin_unlock(&ctx->completion_lock);
+ napi_show_fdinfo(ctx, m);
}
-__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+/*
+ * Caller holds a reference to the file already, we don't need to do
+ * anything else to get an extra reference.
+ */
+__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file)
{
- struct io_ring_ctx *ctx = f->private_data;
+ struct io_ring_ctx *ctx = file->private_data;
- if (percpu_ref_tryget(&ctx->refs)) {
+ /*
+ * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
+ * since fdinfo case grabs it in the opposite direction of normal use
+ * cases.
+ */
+ if (mutex_trylock(&ctx->uring_lock)) {
__io_uring_show_fdinfo(ctx, m);
- percpu_ref_put(&ctx->refs);
+ mutex_unlock(&ctx->uring_lock);
}
}
-#endif
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index 68dfc6936aa7..794ef95df293 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -19,6 +19,9 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
unsigned long nr = ctx->file_alloc_end;
int ret;
+ if (!table->bitmap)
+ return -ENFILE;
+
do {
ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
if (ret != nr)
@@ -33,75 +36,48 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
return -ENFILE;
}
-bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files)
+bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table,
+ unsigned nr_files)
{
- table->files = kvcalloc(nr_files, sizeof(table->files[0]),
- GFP_KERNEL_ACCOUNT);
- if (unlikely(!table->files))
+ if (io_rsrc_data_alloc(&table->data, nr_files))
return false;
-
table->bitmap = bitmap_zalloc(nr_files, GFP_KERNEL_ACCOUNT);
- if (unlikely(!table->bitmap)) {
- kvfree(table->files);
- return false;
- }
-
- return true;
+ if (table->bitmap)
+ return true;
+ io_rsrc_data_free(ctx, &table->data);
+ return false;
}
-void io_free_file_tables(struct io_file_table *table)
+void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table)
{
- kvfree(table->files);
+ io_rsrc_data_free(ctx, &table->data);
bitmap_free(table->bitmap);
- table->files = NULL;
table->bitmap = NULL;
}
static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
u32 slot_index)
- __must_hold(&req->ctx->uring_lock)
+ __must_hold(&ctx->uring_lock)
{
- bool needs_switch = false;
- struct io_fixed_file *file_slot;
- int ret;
+ struct io_rsrc_node *node;
if (io_is_uring_fops(file))
return -EBADF;
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- if (slot_index >= ctx->nr_user_files)
+ if (slot_index >= ctx->file_table.data.nr)
return -EINVAL;
- slot_index = array_index_nospec(slot_index, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, slot_index);
-
- if (file_slot->file_ptr) {
- struct file *old_file;
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ if (!node)
+ return -ENOMEM;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- goto err;
-
- old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- ret = io_queue_rsrc_removal(ctx->file_data, slot_index,
- ctx->rsrc_node, old_file);
- if (ret)
- goto err;
- file_slot->file_ptr = 0;
- io_file_bitmap_clear(&ctx->file_table, slot_index);
- needs_switch = true;
- }
-
- ret = io_scm_file_account(ctx, file);
- if (!ret) {
- *io_get_tag_slot(ctx->file_data, slot_index) = 0;
- io_fixed_file_set(file_slot, file);
+ if (!io_reset_rsrc_node(ctx, &ctx->file_table.data, slot_index))
io_file_bitmap_set(&ctx->file_table, slot_index);
- }
-err:
- if (needs_switch)
- io_rsrc_node_switch(ctx, ctx->file_data);
- return ret;
+
+ ctx->file_table.data.nodes[slot_index] = node;
+ io_fixed_file_set(node, file);
+ return 0;
}
int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
@@ -145,31 +121,18 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
{
- struct io_fixed_file *file_slot;
- struct file *file;
- int ret;
+ struct io_rsrc_node *node;
- if (unlikely(!ctx->file_data))
+ if (unlikely(!ctx->file_table.data.nr))
return -ENXIO;
- if (offset >= ctx->nr_user_files)
+ if (offset >= ctx->file_table.data.nr)
return -EINVAL;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- offset = array_index_nospec(offset, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, offset);
- if (!file_slot->file_ptr)
+ node = io_rsrc_node_lookup(&ctx->file_table.data, offset);
+ if (!node)
return -EBADF;
-
- file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
- if (ret)
- return ret;
-
- file_slot->file_ptr = 0;
+ io_reset_rsrc_node(ctx, &ctx->file_table.data, offset);
io_file_bitmap_clear(&ctx->file_table, offset);
- io_rsrc_node_switch(ctx, ctx->file_data);
return 0;
}
@@ -183,7 +146,7 @@ int io_register_file_alloc_range(struct io_ring_ctx *ctx,
return -EFAULT;
if (check_add_overflow(range.off, range.len, &end))
return -EOVERFLOW;
- if (range.resv || end > ctx->nr_user_files)
+ if (range.resv || end > ctx->file_table.data.nr)
return -EINVAL;
io_file_table_set_alloc_range(ctx, range.off, range.len);
diff --git a/io_uring/filetable.h b/io_uring/filetable.h
index 351111ff8882..7717ea9efd0e 100644
--- a/io_uring/filetable.h
+++ b/io_uring/filetable.h
@@ -4,13 +4,10 @@
#include <linux/file.h>
#include <linux/io_uring_types.h>
+#include "rsrc.h"
-#define FFS_NOWAIT 0x1UL
-#define FFS_ISREG 0x2UL
-#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
-
-bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
-void io_free_file_tables(struct io_file_table *table);
+bool io_alloc_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table, unsigned nr_files);
+void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table);
int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
struct file *file, unsigned int file_slot);
@@ -21,7 +18,7 @@ int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset);
int io_register_file_alloc_range(struct io_ring_ctx *ctx,
struct io_uring_file_index_range __user *arg);
-unsigned int io_file_get_flags(struct file *file);
+io_req_flags_t io_file_get_flags(struct file *file);
static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
{
@@ -37,32 +34,26 @@ static inline void io_file_bitmap_set(struct io_file_table *table, int bit)
table->alloc_hint = bit + 1;
}
-static inline struct io_fixed_file *
-io_fixed_file_slot(struct io_file_table *table, unsigned i)
-{
- return &table->files[i];
-}
+#define FFS_NOWAIT 0x1UL
+#define FFS_ISREG 0x2UL
+#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG)
-static inline struct file *io_file_from_index(struct io_file_table *table,
- int index)
+static inline unsigned int io_slot_flags(struct io_rsrc_node *node)
{
- struct io_fixed_file *slot = io_fixed_file_slot(table, index);
- return (struct file *) (slot->file_ptr & FFS_MASK);
+ return (node->file_ptr & ~FFS_MASK) << REQ_F_SUPPORT_NOWAIT_BIT;
}
-static inline void io_fixed_file_set(struct io_fixed_file *file_slot,
- struct file *file)
+static inline struct file *io_slot_file(struct io_rsrc_node *node)
{
- unsigned long file_ptr = (unsigned long) file;
-
- file_ptr |= io_file_get_flags(file);
- file_slot->file_ptr = file_ptr;
+ return (struct file *)(node->file_ptr & FFS_MASK);
}
-static inline void io_reset_alloc_hint(struct io_ring_ctx *ctx)
+static inline void io_fixed_file_set(struct io_rsrc_node *node,
+ struct file *file)
{
- ctx->file_table.alloc_hint = ctx->file_alloc_start;
+ node->file_ptr = (unsigned long)file |
+ (io_file_get_flags(file) >> REQ_F_SUPPORT_NOWAIT_BIT);
}
static inline void io_file_table_set_alloc_range(struct io_ring_ctx *ctx,
@@ -70,7 +61,7 @@ static inline void io_file_table_set_alloc_range(struct io_ring_ctx *ctx,
{
ctx->file_alloc_start = off;
ctx->file_alloc_end = off + len;
- io_reset_alloc_hint(ctx);
+ ctx->file_table.alloc_hint = ctx->file_alloc_start;
}
#endif
diff --git a/io_uring/fs.c b/io_uring/fs.c
index 7100c293c13a..37079a414eab 100644
--- a/io_uring/fs.c
+++ b/io_uring/fs.c
@@ -74,6 +74,7 @@ int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -82,15 +83,14 @@ int io_renameat(struct io_kiocb *req, unsigned int issue_flags)
struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_renameat2(ren->old_dfd, ren->oldpath, ren->new_dfd,
ren->newpath, ren->flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_renameat_cleanup(struct io_kiocb *req)
@@ -123,6 +123,7 @@ int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return PTR_ERR(un->filename);
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -131,8 +132,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_unlink *un = io_kiocb_to_cmd(req, struct io_unlink);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
if (un->flags & AT_REMOVEDIR)
ret = do_rmdir(un->dfd, un->filename);
@@ -141,7 +141,7 @@ int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_unlinkat_cleanup(struct io_kiocb *req)
@@ -170,6 +170,7 @@ int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return PTR_ERR(mkd->filename);
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -178,14 +179,13 @@ int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags)
struct io_mkdir *mkd = io_kiocb_to_cmd(req, struct io_mkdir);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_mkdirat_cleanup(struct io_kiocb *req)
@@ -220,6 +220,7 @@ int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -228,14 +229,13 @@ int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_link *sl = io_kiocb_to_cmd(req, struct io_link);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -243,7 +243,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
const char __user *oldf, *newf;
- if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
@@ -254,7 +254,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
lnk->flags = READ_ONCE(sqe->hardlink_flags);
- lnk->oldpath = getname(oldf);
+ lnk->oldpath = getname_uflags(oldf, lnk->flags);
if (IS_ERR(lnk->oldpath))
return PTR_ERR(lnk->oldpath);
@@ -265,6 +265,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -273,15 +274,14 @@ int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
lnk->newpath, lnk->flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_link_cleanup(struct io_kiocb *req)
diff --git a/io_uring/futex.c b/io_uring/futex.c
new file mode 100644
index 000000000000..11bfff5a80df
--- /dev/null
+++ b/io_uring/futex.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "../kernel/futex/futex.h"
+#include "io_uring.h"
+#include "alloc_cache.h"
+#include "futex.h"
+
+struct io_futex {
+ struct file *file;
+ void __user *uaddr;
+ unsigned long futex_val;
+ unsigned long futex_mask;
+ u32 futex_flags;
+ unsigned int futex_nr;
+ bool futexv_unqueued;
+};
+
+struct io_futex_data {
+ struct futex_q q;
+ struct io_kiocb *req;
+};
+
+struct io_futexv_data {
+ unsigned long owned;
+ struct futex_vector futexv[];
+};
+
+#define IO_FUTEX_ALLOC_CACHE_MAX 32
+
+bool io_futex_cache_init(struct io_ring_ctx *ctx)
+{
+ return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX,
+ sizeof(struct io_futex_data), 0);
+}
+
+void io_futex_cache_free(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_free(&ctx->futex_cache, kfree);
+}
+
+static void __io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ hlist_del_init(&tw_req.req->hash_node);
+ io_req_task_complete(tw_req, tw);
+}
+
+static void io_futex_complete(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_kiocb *req = tw_req.req;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_tw_lock(ctx, tw);
+ io_cache_free(&ctx->futex_cache, req->async_data);
+ io_req_async_data_clear(req, 0);
+ __io_futex_complete(tw_req, tw);
+}
+
+static void io_futexv_complete(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_kiocb *req = tw_req.req;
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ struct io_futexv_data *ifd = req->async_data;
+
+ io_tw_lock(req->ctx, tw);
+
+ if (!iof->futexv_unqueued) {
+ int res;
+
+ res = futex_unqueue_multiple(ifd->futexv, iof->futex_nr);
+ if (res != -1)
+ io_req_set_res(req, res, 0);
+ }
+
+ io_req_async_data_free(req);
+ __io_futex_complete(tw_req, tw);
+}
+
+static bool io_futexv_claim(struct io_futexv_data *ifd)
+{
+ if (test_bit(0, &ifd->owned) || test_and_set_bit_lock(0, &ifd->owned))
+ return false;
+ return true;
+}
+
+static bool __io_futex_cancel(struct io_kiocb *req)
+{
+ /* futex wake already done or in progress */
+ if (req->opcode == IORING_OP_FUTEX_WAIT) {
+ struct io_futex_data *ifd = req->async_data;
+
+ if (!futex_unqueue(&ifd->q))
+ return false;
+ req->io_task_work.func = io_futex_complete;
+ } else {
+ struct io_futexv_data *ifd = req->async_data;
+
+ if (!io_futexv_claim(ifd))
+ return false;
+ req->io_task_work.func = io_futexv_complete;
+ }
+
+ hlist_del_init(&req->hash_node);
+ io_req_set_res(req, -ECANCELED, 0);
+ io_req_task_work_add(req);
+ return true;
+}
+
+int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags)
+{
+ return io_cancel_remove(ctx, cd, issue_flags, &ctx->futex_list, __io_futex_cancel);
+}
+
+bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ bool cancel_all)
+{
+ return io_cancel_remove_all(ctx, tctx, &ctx->futex_list, cancel_all, __io_futex_cancel);
+}
+
+int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ u32 flags;
+
+ if (unlikely(sqe->len || sqe->futex_flags || sqe->buf_index ||
+ sqe->file_index))
+ return -EINVAL;
+
+ iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ iof->futex_val = READ_ONCE(sqe->addr2);
+ iof->futex_mask = READ_ONCE(sqe->addr3);
+ flags = READ_ONCE(sqe->fd);
+
+ if (flags & ~FUTEX2_VALID_MASK)
+ return -EINVAL;
+
+ iof->futex_flags = futex2_to_flags(flags);
+ if (!futex_flags_valid(iof->futex_flags))
+ return -EINVAL;
+
+ if (!futex_validate_input(iof->futex_flags, iof->futex_val) ||
+ !futex_validate_input(iof->futex_flags, iof->futex_mask))
+ return -EINVAL;
+
+ /* Mark as inflight, so file exit cancelation will find it */
+ io_req_track_inflight(req);
+ return 0;
+}
+
+static void io_futex_wakev_fn(struct wake_q_head *wake_q, struct futex_q *q)
+{
+ struct io_kiocb *req = q->wake_data;
+ struct io_futexv_data *ifd = req->async_data;
+
+ if (!io_futexv_claim(ifd))
+ return;
+ if (unlikely(!__futex_wake_mark(q)))
+ return;
+
+ io_req_set_res(req, 0, 0);
+ req->io_task_work.func = io_futexv_complete;
+ io_req_task_work_add(req);
+}
+
+int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ struct io_futexv_data *ifd;
+ int ret;
+
+ /* No flags or mask supported for waitv */
+ if (unlikely(sqe->fd || sqe->buf_index || sqe->file_index ||
+ sqe->addr2 || sqe->futex_flags || sqe->addr3))
+ return -EINVAL;
+
+ iof->uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ iof->futex_nr = READ_ONCE(sqe->len);
+ if (!iof->futex_nr || iof->futex_nr > FUTEX_WAITV_MAX)
+ return -EINVAL;
+
+ ifd = kzalloc(struct_size_t(struct io_futexv_data, futexv, iof->futex_nr),
+ GFP_KERNEL);
+ if (!ifd)
+ return -ENOMEM;
+
+ ret = futex_parse_waitv(ifd->futexv, iof->uaddr, iof->futex_nr,
+ io_futex_wakev_fn, req);
+ if (ret) {
+ kfree(ifd);
+ return ret;
+ }
+
+ /* Mark as inflight, so file exit cancelation will find it */
+ io_req_track_inflight(req);
+ iof->futexv_unqueued = 0;
+ req->flags |= REQ_F_ASYNC_DATA;
+ req->async_data = ifd;
+ return 0;
+}
+
+static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q)
+{
+ struct io_futex_data *ifd = container_of(q, struct io_futex_data, q);
+ struct io_kiocb *req = ifd->req;
+
+ if (unlikely(!__futex_wake_mark(q)))
+ return;
+
+ io_req_set_res(req, 0, 0);
+ req->io_task_work.func = io_futex_complete;
+ io_req_task_work_add(req);
+}
+
+int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ struct io_futexv_data *ifd = req->async_data;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, woken = -1;
+
+ io_ring_submit_lock(ctx, issue_flags);
+
+ ret = futex_wait_multiple_setup(ifd->futexv, iof->futex_nr, &woken);
+
+ /*
+ * Error case, ret is < 0. Mark the request as failed.
+ */
+ if (unlikely(ret < 0)) {
+ io_ring_submit_unlock(ctx, issue_flags);
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ io_req_async_data_free(req);
+ return IOU_COMPLETE;
+ }
+
+ /*
+ * 0 return means that we successfully setup the waiters, and that
+ * nobody triggered a wakeup while we were doing so. If the wakeup
+ * happened post setup, the task_work will be run post this issue and
+ * under the submission lock. 1 means We got woken while setting up,
+ * let that side do the completion. Note that
+ * futex_wait_multiple_setup() will have unqueued all the futexes in
+ * this case. Mark us as having done that already, since this is
+ * different from normal wakeup.
+ */
+ if (!ret) {
+ /*
+ * If futex_wait_multiple_setup() returns 0 for a
+ * successful setup, then the task state will not be
+ * runnable. This is fine for the sync syscall, as
+ * it'll be blocking unless we already got one of the
+ * futexes woken, but it obviously won't work for an
+ * async invocation. Mark us runnable again.
+ */
+ __set_current_state(TASK_RUNNING);
+ hlist_add_head(&req->hash_node, &ctx->futex_list);
+ } else {
+ iof->futexv_unqueued = 1;
+ if (woken != -1)
+ io_req_set_res(req, woken, 0);
+ }
+
+ io_ring_submit_unlock(ctx, issue_flags);
+ return IOU_ISSUE_SKIP_COMPLETE;
+}
+
+int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_futex_data *ifd = NULL;
+ int ret;
+
+ if (!iof->futex_mask) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ io_ring_submit_lock(ctx, issue_flags);
+ ifd = io_cache_alloc(&ctx->futex_cache, GFP_NOWAIT);
+ if (!ifd) {
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+
+ req->flags |= REQ_F_ASYNC_DATA;
+ req->async_data = ifd;
+ ifd->q = futex_q_init;
+ ifd->q.bitset = iof->futex_mask;
+ ifd->q.wake = io_futex_wake_fn;
+ ifd->req = req;
+
+ ret = futex_wait_setup(iof->uaddr, iof->futex_val, iof->futex_flags,
+ &ifd->q, NULL, NULL);
+ if (!ret) {
+ hlist_add_head(&req->hash_node, &ctx->futex_list);
+ io_ring_submit_unlock(ctx, issue_flags);
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+
+done_unlock:
+ io_ring_submit_unlock(ctx, issue_flags);
+done:
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ io_req_async_data_free(req);
+ return IOU_COMPLETE;
+}
+
+int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex);
+ int ret;
+
+ /*
+ * Strict flags - ensure that waking 0 futexes yields a 0 result.
+ * See commit 43adf8449510 ("futex: FLAGS_STRICT") for details.
+ */
+ ret = futex_wake(iof->uaddr, FLAGS_STRICT | iof->futex_flags,
+ iof->futex_val, iof->futex_mask);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+}
diff --git a/io_uring/futex.h b/io_uring/futex.h
new file mode 100644
index 000000000000..d789fcf715e3
--- /dev/null
+++ b/io_uring/futex.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "cancel.h"
+
+int io_futex_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_futexv_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags);
+int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags);
+int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags);
+
+#if defined(CONFIG_FUTEX)
+int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags);
+bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ bool cancel_all);
+bool io_futex_cache_init(struct io_ring_ctx *ctx);
+void io_futex_cache_free(struct io_ring_ctx *ctx);
+#else
+static inline int io_futex_cancel(struct io_ring_ctx *ctx,
+ struct io_cancel_data *cd,
+ unsigned int issue_flags)
+{
+ return 0;
+}
+static inline bool io_futex_remove_all(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx, bool cancel_all)
+{
+ return false;
+}
+static inline bool io_futex_cache_init(struct io_ring_ctx *ctx)
+{
+ return false;
+}
+static inline void io_futex_cache_free(struct io_ring_ctx *ctx)
+{
+}
+#endif
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 411bb2d1acd4..cd13d8aac3d2 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -13,8 +13,10 @@
#include <linux/slab.h>
#include <linux/rculist_nulls.h>
#include <linux/cpu.h>
+#include <linux/cpuset.h>
#include <linux/task_work.h>
#include <linux/audit.h>
+#include <linux/mmu_context.h>
#include <uapi/linux/io_uring.h>
#include "io-wq.h"
@@ -22,12 +24,12 @@
#include "io_uring.h"
#define WORKER_IDLE_TIMEOUT (5 * HZ)
+#define WORKER_INIT_LIMIT 3
enum {
- IO_WORKER_F_UP = 1, /* up and active */
- IO_WORKER_F_RUNNING = 2, /* account as running */
- IO_WORKER_F_FREE = 4, /* worker on free list */
- IO_WORKER_F_BOUND = 8, /* is doing bounded work */
+ IO_WORKER_F_UP = 0, /* up and active */
+ IO_WORKER_F_RUNNING = 1, /* account as running */
+ IO_WORKER_F_FREE = 2, /* worker on free list */
};
enum {
@@ -39,29 +41,29 @@ enum {
};
/*
- * One for each thread in a wqe pool
+ * One for each thread in a wq pool
*/
struct io_worker {
refcount_t ref;
- unsigned flags;
+ unsigned long flags;
struct hlist_nulls_node nulls_node;
struct list_head all_list;
struct task_struct *task;
- struct io_wqe *wqe;
+ struct io_wq *wq;
+ struct io_wq_acct *acct;
struct io_wq_work *cur_work;
- struct io_wq_work *next_work;
raw_spinlock_t lock;
struct completion ref_done;
unsigned long create_state;
struct callback_head create_work;
- int create_index;
+ int init_retries;
union {
struct rcu_head rcu;
- struct work_struct work;
+ struct delayed_work work;
};
};
@@ -73,11 +75,28 @@ struct io_worker {
#define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
-struct io_wqe_acct {
+struct io_wq_acct {
+ /**
+ * Protects access to the worker lists.
+ */
+ raw_spinlock_t workers_lock;
+
unsigned nr_workers;
unsigned max_workers;
- int index;
atomic_t nr_running;
+
+ /**
+ * The list of free workers. Protected by #workers_lock
+ * (write) and RCU (read).
+ */
+ struct hlist_nulls_head free_list;
+
+ /**
+ * The list of all workers. Protected by #workers_lock
+ * (write) and RCU (read).
+ */
+ struct list_head all_list;
+
raw_spinlock_t lock;
struct io_wq_work_list work_list;
unsigned long flags;
@@ -90,34 +109,11 @@ enum {
};
/*
- * Per-node worker thread pool
- */
-struct io_wqe {
- raw_spinlock_t lock;
- struct io_wqe_acct acct[IO_WQ_ACCT_NR];
-
- int node;
-
- struct hlist_nulls_head free_list;
- struct list_head all_list;
-
- struct wait_queue_entry wait;
-
- struct io_wq *wq;
- struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
-
- cpumask_var_t cpu_mask;
-};
-
-/*
* Per io_wq state
*/
struct io_wq {
unsigned long state;
- free_work_fn *free_work;
- io_wq_work_fn *do_work;
-
struct io_wq_hash *hash;
atomic_t worker_refs;
@@ -127,7 +123,13 @@ struct io_wq {
struct task_struct *task;
- struct io_wqe *wqes[];
+ struct io_wq_acct acct[IO_WQ_ACCT_NR];
+
+ struct wait_queue_entry wait;
+
+ struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
+
+ cpumask_var_t cpu_mask;
};
static enum cpuhp_state io_wq_online;
@@ -140,14 +142,24 @@ struct io_cb_cancel_data {
bool cancel_all;
};
-static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
-static void io_wqe_dec_running(struct io_worker *worker);
-static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
- struct io_wqe_acct *acct,
+static bool create_io_worker(struct io_wq *wq, struct io_wq_acct *acct);
+static void io_wq_dec_running(struct io_worker *worker);
+static bool io_acct_cancel_pending_work(struct io_wq *wq,
+ struct io_wq_acct *acct,
struct io_cb_cancel_data *match);
static void create_worker_cb(struct callback_head *cb);
static void io_wq_cancel_tw_create(struct io_wq *wq);
+static inline unsigned int __io_get_work_hash(unsigned int work_flags)
+{
+ return work_flags >> IO_WQ_HASH_SHIFT;
+}
+
+static inline unsigned int io_get_work_hash(struct io_wq_work *work)
+{
+ return __io_get_work_hash(atomic_read(&work->flags));
+}
+
static bool io_worker_get(struct io_worker *worker)
{
return refcount_inc_not_zero(&worker->ref);
@@ -159,20 +171,20 @@ static void io_worker_release(struct io_worker *worker)
complete(&worker->ref_done);
}
-static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
+static inline struct io_wq_acct *io_get_acct(struct io_wq *wq, bool bound)
{
- return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
+ return &wq->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
}
-static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
- struct io_wq_work *work)
+static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
+ unsigned int work_flags)
{
- return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
+ return io_get_acct(wq, !(work_flags & IO_WQ_WORK_UNBOUND));
}
-static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
+static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
{
- return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
+ return worker->acct;
}
static void io_worker_ref_put(struct io_wq *wq)
@@ -181,16 +193,25 @@ static void io_worker_ref_put(struct io_wq *wq)
complete(&wq->worker_done);
}
+bool io_wq_worker_stopped(void)
+{
+ struct io_worker *worker = current->worker_private;
+
+ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
+ return true;
+
+ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
+}
+
static void io_worker_cancel_cb(struct io_worker *worker)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+ struct io_wq *wq = worker->wq;
atomic_dec(&acct->nr_running);
- raw_spin_lock(&worker->wqe->lock);
+ raw_spin_lock(&acct->workers_lock);
acct->nr_workers--;
- raw_spin_unlock(&worker->wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
io_worker_ref_put(wq);
clear_bit_unlock(0, &worker->create_state);
io_worker_release(worker);
@@ -208,8 +229,8 @@ static bool io_task_worker_match(struct callback_head *cb, void *data)
static void io_worker_exit(struct io_worker *worker)
{
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
+ struct io_wq *wq = worker->wq;
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
while (1) {
struct callback_head *cb = task_work_cancel_match(wq->task,
@@ -223,41 +244,50 @@ static void io_worker_exit(struct io_worker *worker)
io_worker_release(worker);
wait_for_completion(&worker->ref_done);
- raw_spin_lock(&wqe->lock);
- if (worker->flags & IO_WORKER_F_FREE)
+ raw_spin_lock(&acct->workers_lock);
+ if (test_bit(IO_WORKER_F_FREE, &worker->flags))
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
- raw_spin_unlock(&wqe->lock);
- io_wqe_dec_running(worker);
- worker->flags = 0;
- preempt_disable();
- current->flags &= ~PF_IO_WORKER;
- preempt_enable();
+ raw_spin_unlock(&acct->workers_lock);
+ io_wq_dec_running(worker);
+ /*
+ * this worker is a goner, clear ->worker_private to avoid any
+ * inc/dec running calls that could happen as part of exit from
+ * touching 'worker'.
+ */
+ current->worker_private = NULL;
kfree_rcu(worker, rcu);
- io_worker_ref_put(wqe->wq);
+ io_worker_ref_put(wq);
do_exit(0);
}
-static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
+static inline bool __io_acct_run_queue(struct io_wq_acct *acct)
{
- bool ret = false;
+ return !test_bit(IO_ACCT_STALLED_BIT, &acct->flags) &&
+ !wq_list_empty(&acct->work_list);
+}
+/*
+ * If there's work to do, returns true with acct->lock acquired. If not,
+ * returns false with no lock held.
+ */
+static inline bool io_acct_run_queue(struct io_wq_acct *acct)
+ __acquires(&acct->lock)
+{
raw_spin_lock(&acct->lock);
- if (!wq_list_empty(&acct->work_list) &&
- !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
- ret = true;
- raw_spin_unlock(&acct->lock);
+ if (__io_acct_run_queue(acct))
+ return true;
- return ret;
+ raw_spin_unlock(&acct->lock);
+ return false;
}
/*
* Check head of free list for an available worker. If one isn't available,
* caller must create one.
*/
-static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
- struct io_wqe_acct *acct)
+static bool io_acct_activate_free_worker(struct io_wq_acct *acct)
__must_hold(RCU)
{
struct hlist_nulls_node *n;
@@ -268,18 +298,17 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
* activate. If a given worker is on the free_list but in the process
* of exiting, keep trying.
*/
- hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
+ hlist_nulls_for_each_entry_rcu(worker, n, &acct->free_list, nulls_node) {
if (!io_worker_get(worker))
continue;
- if (io_wqe_get_acct(worker) != acct) {
- io_worker_release(worker);
- continue;
- }
- if (wake_up_process(worker->task)) {
- io_worker_release(worker);
- return true;
- }
+ /*
+ * If the worker is already running, it's either already
+ * starting work or finishing work. In either case, if it does
+ * to go sleep, we'll kick off a new task for this work anyway.
+ */
+ wake_up_process(worker->task);
io_worker_release(worker);
+ return true;
}
return false;
@@ -289,7 +318,7 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
* We need a worker. If we find a free one, we're good. If not, and we're
* below the max number of workers, create one.
*/
-static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
+static bool io_wq_create_worker(struct io_wq *wq, struct io_wq_acct *acct)
{
/*
* Most likely an attempt to queue unbounded work on an io_wq that
@@ -298,21 +327,21 @@ static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
if (unlikely(!acct->max_workers))
pr_warn_once("io-wq is not configured for unbound workers");
- raw_spin_lock(&wqe->lock);
+ raw_spin_lock(&acct->workers_lock);
if (acct->nr_workers >= acct->max_workers) {
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
return true;
}
acct->nr_workers++;
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
- return create_io_worker(wqe->wq, wqe, acct->index);
+ atomic_inc(&wq->worker_refs);
+ return create_io_worker(wq, acct);
}
-static void io_wqe_inc_running(struct io_worker *worker)
+static void io_wq_inc_running(struct io_worker *worker)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
atomic_inc(&acct->nr_running);
}
@@ -321,23 +350,31 @@ static void create_worker_cb(struct callback_head *cb)
{
struct io_worker *worker;
struct io_wq *wq;
- struct io_wqe *wqe;
- struct io_wqe_acct *acct;
- bool do_create = false;
+
+ struct io_wq_acct *acct;
+ bool activated_free_worker, do_create = false;
worker = container_of(cb, struct io_worker, create_work);
- wqe = worker->wqe;
- wq = wqe->wq;
- acct = &wqe->acct[worker->create_index];
- raw_spin_lock(&wqe->lock);
+ wq = worker->wq;
+ acct = worker->acct;
+
+ rcu_read_lock();
+ activated_free_worker = io_acct_activate_free_worker(acct);
+ rcu_read_unlock();
+ if (activated_free_worker)
+ goto no_need_create;
+
+ raw_spin_lock(&acct->workers_lock);
+
if (acct->nr_workers < acct->max_workers) {
acct->nr_workers++;
do_create = true;
}
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
if (do_create) {
- create_io_worker(wq, wqe, worker->create_index);
+ create_io_worker(wq, acct);
} else {
+no_need_create:
atomic_dec(&acct->nr_running);
io_worker_ref_put(wq);
}
@@ -346,11 +383,10 @@ static void create_worker_cb(struct callback_head *cb)
}
static bool io_queue_worker_create(struct io_worker *worker,
- struct io_wqe_acct *acct,
+ struct io_wq_acct *acct,
task_work_func_t func)
{
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
+ struct io_wq *wq = worker->wq;
/* raced with exit, just ignore create call */
if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
@@ -369,7 +405,6 @@ static bool io_queue_worker_create(struct io_worker *worker,
atomic_inc(&wq->worker_refs);
init_task_work(&worker->create_work, func);
- worker->create_index = acct->index;
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
/*
* EXIT may have been set after checking it above, check after
@@ -392,21 +427,52 @@ fail:
return false;
}
-static void io_wqe_dec_running(struct io_worker *worker)
+/* Defer if current and next work are both hashed to the same chain */
+static bool io_wq_hash_defer(struct io_wq_work *work, struct io_wq_acct *acct)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
+ unsigned int hash, work_flags;
+ struct io_wq_work *next;
- if (!(worker->flags & IO_WORKER_F_UP))
+ lockdep_assert_held(&acct->lock);
+
+ work_flags = atomic_read(&work->flags);
+ if (!__io_wq_is_hashed(work_flags))
+ return false;
+
+ /* should not happen, io_acct_run_queue() said we had work */
+ if (wq_list_empty(&acct->work_list))
+ return true;
+
+ hash = __io_get_work_hash(work_flags);
+ next = container_of(acct->work_list.first, struct io_wq_work, list);
+ work_flags = atomic_read(&next->flags);
+ if (!__io_wq_is_hashed(work_flags))
+ return false;
+ return hash == __io_get_work_hash(work_flags);
+}
+
+static void io_wq_dec_running(struct io_worker *worker)
+{
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+ struct io_wq *wq = worker->wq;
+
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
if (!atomic_dec_and_test(&acct->nr_running))
return;
+ if (!worker->cur_work)
+ return;
if (!io_acct_run_queue(acct))
return;
+ if (io_wq_hash_defer(worker->cur_work, acct)) {
+ raw_spin_unlock(&acct->lock);
+ return;
+ }
+ raw_spin_unlock(&acct->lock);
atomic_inc(&acct->nr_running);
- atomic_inc(&wqe->wq->worker_refs);
+ atomic_inc(&wq->worker_refs);
io_queue_worker_create(worker, acct, create_worker_cb);
}
@@ -414,48 +480,38 @@ static void io_wqe_dec_running(struct io_worker *worker)
* Worker will start processing some work. Move it to the busy list, if
* it's currently on the freelist
*/
-static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
+static void __io_worker_busy(struct io_wq_acct *acct, struct io_worker *worker)
{
- if (worker->flags & IO_WORKER_F_FREE) {
- worker->flags &= ~IO_WORKER_F_FREE;
- raw_spin_lock(&wqe->lock);
+ if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
+ clear_bit(IO_WORKER_F_FREE, &worker->flags);
+ raw_spin_lock(&acct->workers_lock);
hlist_nulls_del_init_rcu(&worker->nulls_node);
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
}
}
/*
- * No work, worker going to sleep. Move to freelist, and unuse mm if we
- * have one attached. Dropping the mm may potentially sleep, so we drop
- * the lock in that case and return success. Since the caller has to
- * retry the loop in that case (we changed task state), we don't regrab
- * the lock if we return success.
+ * No work, worker going to sleep. Move to freelist.
*/
-static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
- __must_hold(wqe->lock)
+static void __io_worker_idle(struct io_wq_acct *acct, struct io_worker *worker)
+ __must_hold(acct->workers_lock)
{
- if (!(worker->flags & IO_WORKER_F_FREE)) {
- worker->flags |= IO_WORKER_F_FREE;
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
+ if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
+ set_bit(IO_WORKER_F_FREE, &worker->flags);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list);
}
}
-static inline unsigned int io_get_work_hash(struct io_wq_work *work)
-{
- return work->flags >> IO_WQ_HASH_SHIFT;
-}
-
-static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+static bool io_wait_on_hash(struct io_wq *wq, unsigned int hash)
{
- struct io_wq *wq = wqe->wq;
bool ret = false;
spin_lock_irq(&wq->hash->wait.lock);
- if (list_empty(&wqe->wait.entry)) {
- __add_wait_queue(&wq->hash->wait, &wqe->wait);
+ if (list_empty(&wq->wait.entry)) {
+ __add_wait_queue(&wq->hash->wait, &wq->wait);
if (!test_bit(hash, &wq->hash->map)) {
__set_current_state(TASK_RUNNING);
- list_del_init(&wqe->wait.entry);
+ list_del_init(&wq->wait.entry);
ret = true;
}
}
@@ -463,33 +519,34 @@ static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
return ret;
}
-static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
- struct io_worker *worker)
+static struct io_wq_work *io_get_next_work(struct io_wq_acct *acct,
+ struct io_wq *wq)
__must_hold(acct->lock)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work, *tail;
unsigned int stall_hash = -1U;
- struct io_wqe *wqe = worker->wqe;
wq_list_for_each(node, prev, &acct->work_list) {
+ unsigned int work_flags;
unsigned int hash;
work = container_of(node, struct io_wq_work, list);
/* not hashed, can run anytime */
- if (!io_wq_is_hashed(work)) {
+ work_flags = atomic_read(&work->flags);
+ if (!__io_wq_is_hashed(work_flags)) {
wq_list_del(&acct->work_list, node, prev);
return work;
}
- hash = io_get_work_hash(work);
+ hash = __io_get_work_hash(work_flags);
/* all items with this hash lie in [work, tail] */
- tail = wqe->hash_tail[hash];
+ tail = wq->hash_tail[hash];
/* hashed, can run if not already running */
- if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
- wqe->hash_tail[hash] = NULL;
+ if (!test_and_set_bit(hash, &wq->hash->map)) {
+ wq->hash_tail[hash] = NULL;
wq_list_cut(&acct->work_list, &tail->list, prev);
return work;
}
@@ -508,12 +565,12 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
raw_spin_unlock(&acct->lock);
- unstalled = io_wait_on_hash(wqe, stall_hash);
+ unstalled = io_wait_on_hash(wq, stall_hash);
raw_spin_lock(&acct->lock);
if (unstalled) {
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
- if (wq_has_sleeper(&wqe->wq->hash->wait))
- wake_up(&wqe->wq->hash->wait);
+ if (wq_has_sleeper(&wq->hash->wait))
+ wake_up(&wq->hash->wait);
}
}
@@ -530,17 +587,17 @@ static void io_assign_current_work(struct io_worker *worker,
raw_spin_lock(&worker->lock);
worker->cur_work = work;
- worker->next_work = NULL;
raw_spin_unlock(&worker->lock);
}
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
-
-static void io_worker_handle_work(struct io_worker *worker)
+/*
+ * Called with acct->lock held, drops it before returning
+ */
+static void io_worker_handle_work(struct io_wq_acct *acct,
+ struct io_worker *worker)
+ __releases(&acct->lock)
{
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
+ struct io_wq *wq = worker->wq;
bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
do {
@@ -553,12 +610,8 @@ static void io_worker_handle_work(struct io_worker *worker)
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
- raw_spin_lock(&acct->lock);
- work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
+ work = io_get_next_work(acct, wq);
if (work) {
- __io_worker_busy(wqe, worker);
-
/*
* Make sure cancelation can find this, even before
* it becomes the active work. That avoids a window
@@ -567,27 +620,37 @@ static void io_worker_handle_work(struct io_worker *worker)
* current work item for this worker.
*/
raw_spin_lock(&worker->lock);
- worker->next_work = work;
+ worker->cur_work = work;
raw_spin_unlock(&worker->lock);
- } else {
- break;
}
+
+ raw_spin_unlock(&acct->lock);
+
+ if (!work)
+ break;
+
+ __io_worker_busy(acct, worker);
+
io_assign_current_work(worker, work);
__set_current_state(TASK_RUNNING);
/* handle a whole dependent link */
do {
struct io_wq_work *next_hashed, *linked;
- unsigned int hash = io_get_work_hash(work);
+ unsigned int work_flags = atomic_read(&work->flags);
+ unsigned int hash = __io_wq_is_hashed(work_flags)
+ ? __io_get_work_hash(work_flags)
+ : -1U;
next_hashed = wq_next_work(work);
- if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
- work->flags |= IO_WQ_WORK_CANCEL;
- wq->do_work(work);
+ if (do_kill &&
+ (work_flags & IO_WQ_WORK_UNBOUND))
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
+ io_wq_submit_work(work);
io_assign_current_work(worker, NULL);
- linked = wq->free_work(work);
+ linked = io_wq_free_work(work);
work = next_hashed;
if (!work && linked && !io_wq_is_hashed(linked)) {
work = linked;
@@ -595,7 +658,7 @@ static void io_worker_handle_work(struct io_worker *worker)
}
io_assign_current_work(worker, work);
if (linked)
- io_wqe_enqueue(wqe, linked);
+ io_wq_enqueue(wq, linked);
if (hash != -1U && !next_hashed) {
/* serialize hash clear with wake_up() */
@@ -607,19 +670,23 @@ static void io_worker_handle_work(struct io_worker *worker)
wake_up(&wq->hash->wait);
}
} while (work);
+
+ if (!__io_acct_run_queue(acct))
+ break;
+ raw_spin_lock(&acct->lock);
} while (1);
}
-static int io_wqe_worker(void *data)
+static int io_wq_worker(void *data)
{
struct io_worker *worker = data;
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
- struct io_wqe *wqe = worker->wqe;
- struct io_wq *wq = wqe->wq;
- bool last_timeout = false;
- char buf[TASK_COMM_LEN];
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
+ struct io_wq *wq = worker->wq;
+ bool exit_mask = false, last_timeout = false;
+ char buf[TASK_COMM_LEN] = {};
- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ set_mask_bits(&worker->flags, 0,
+ BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
set_task_comm(current, buf);
@@ -628,20 +695,28 @@ static int io_wqe_worker(void *data)
long ret;
set_current_state(TASK_INTERRUPTIBLE);
+
+ /*
+ * If we have work to do, io_acct_run_queue() returns with
+ * the acct->lock held. If not, it will drop it.
+ */
while (io_acct_run_queue(acct))
- io_worker_handle_work(worker);
+ io_worker_handle_work(acct, worker);
- raw_spin_lock(&wqe->lock);
- /* timed out, exit unless we're the last worker */
- if (last_timeout && acct->nr_workers > 1) {
+ raw_spin_lock(&acct->workers_lock);
+ /*
+ * Last sleep timed out. Exit if we're not the last worker,
+ * or if someone modified our affinity.
+ */
+ if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
acct->nr_workers--;
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
__set_current_state(TASK_RUNNING);
break;
}
last_timeout = false;
- __io_worker_idle(wqe, worker);
- raw_spin_unlock(&wqe->lock);
+ __io_worker_idle(acct, worker);
+ raw_spin_unlock(&acct->workers_lock);
if (io_run_task_work())
continue;
ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
@@ -652,11 +727,15 @@ static int io_wqe_worker(void *data)
continue;
break;
}
- last_timeout = !ret;
+ if (!ret) {
+ last_timeout = true;
+ exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
+ wq->cpu_mask);
+ }
}
- if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
- io_worker_handle_work(worker);
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) && io_acct_run_queue(acct))
+ io_worker_handle_work(acct, worker);
io_worker_exit(worker);
return 0;
@@ -671,12 +750,12 @@ void io_wq_worker_running(struct task_struct *tsk)
if (!worker)
return;
- if (!(worker->flags & IO_WORKER_F_UP))
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
- if (worker->flags & IO_WORKER_F_RUNNING)
+ if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
return;
- worker->flags |= IO_WORKER_F_RUNNING;
- io_wqe_inc_running(worker);
+ set_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ io_wq_inc_running(worker);
}
/*
@@ -689,28 +768,27 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
if (!worker)
return;
- if (!(worker->flags & IO_WORKER_F_UP))
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
- if (!(worker->flags & IO_WORKER_F_RUNNING))
+ if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
return;
- worker->flags &= ~IO_WORKER_F_RUNNING;
- io_wqe_dec_running(worker);
+ clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
+ io_wq_dec_running(worker);
}
-static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
+static void io_init_new_worker(struct io_wq *wq, struct io_wq_acct *acct, struct io_worker *worker,
struct task_struct *tsk)
{
tsk->worker_private = worker;
worker->task = tsk;
- set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
- tsk->flags |= PF_NO_SETAFFINITY;
-
- raw_spin_lock(&wqe->lock);
- hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
- list_add_tail_rcu(&worker->all_list, &wqe->all_list);
- worker->flags |= IO_WORKER_F_FREE;
- raw_spin_unlock(&wqe->lock);
+ set_cpus_allowed_ptr(tsk, wq->cpu_mask);
+
+ raw_spin_lock(&acct->workers_lock);
+ hlist_nulls_add_head_rcu(&worker->nulls_node, &acct->free_list);
+ list_add_tail_rcu(&worker->all_list, &acct->all_list);
+ set_bit(IO_WORKER_F_FREE, &worker->flags);
+ raw_spin_unlock(&acct->workers_lock);
wake_up_new_task(tsk);
}
@@ -719,7 +797,7 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
return true;
}
-static inline bool io_should_retry_thread(long err)
+static inline bool io_should_retry_thread(struct io_worker *worker, long err)
{
/*
* Prevent perpetual task_work retry, if the task (or its group) is
@@ -728,8 +806,11 @@ static inline bool io_should_retry_thread(long err)
if (fatal_signal_pending(current))
return false;
+ worker->init_retries++;
switch (err) {
case -EAGAIN:
+ return worker->init_retries <= WORKER_INIT_LIMIT;
+ /* Analogous to a fork() syscall, always retry on a restartable error */
case -ERESTARTSYS:
case -ERESTARTNOINTR:
case -ERESTARTNOHAND:
@@ -739,25 +820,37 @@ static inline bool io_should_retry_thread(long err)
}
}
+static void queue_create_worker_retry(struct io_worker *worker)
+{
+ /*
+ * We only bother retrying because there's a chance that the
+ * failure to create a worker is due to some temporary condition
+ * in the forking task (e.g. outstanding signal); give the task
+ * some time to clear that condition.
+ */
+ schedule_delayed_work(&worker->work,
+ msecs_to_jiffies(worker->init_retries * 5));
+}
+
static void create_worker_cont(struct callback_head *cb)
{
struct io_worker *worker;
struct task_struct *tsk;
- struct io_wqe *wqe;
+ struct io_wq *wq;
+ struct io_wq_acct *acct;
worker = container_of(cb, struct io_worker, create_work);
clear_bit_unlock(0, &worker->create_state);
- wqe = worker->wqe;
- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ wq = worker->wq;
+ acct = io_wq_get_acct(worker);
+ tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
if (!IS_ERR(tsk)) {
- io_init_new_worker(wqe, worker, tsk);
+ io_init_new_worker(wq, acct, worker, tsk);
io_worker_release(worker);
return;
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
-
+ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
atomic_dec(&acct->nr_running);
- raw_spin_lock(&wqe->lock);
+ raw_spin_lock(&acct->workers_lock);
acct->nr_workers--;
if (!acct->nr_workers) {
struct io_cb_cancel_data match = {
@@ -765,67 +858,65 @@ static void create_worker_cont(struct callback_head *cb)
.cancel_all = true,
};
- raw_spin_unlock(&wqe->lock);
- while (io_acct_cancel_pending_work(wqe, acct, &match))
+ raw_spin_unlock(&acct->workers_lock);
+ while (io_acct_cancel_pending_work(wq, acct, &match))
;
} else {
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
}
- io_worker_ref_put(wqe->wq);
+ io_worker_ref_put(wq);
kfree(worker);
return;
}
/* re-create attempts grab a new worker ref, drop the existing one */
io_worker_release(worker);
- schedule_work(&worker->work);
+ queue_create_worker_retry(worker);
}
static void io_workqueue_create(struct work_struct *work)
{
- struct io_worker *worker = container_of(work, struct io_worker, work);
- struct io_wqe_acct *acct = io_wqe_get_acct(worker);
+ struct io_worker *worker = container_of(work, struct io_worker,
+ work.work);
+ struct io_wq_acct *acct = io_wq_get_acct(worker);
if (!io_queue_worker_create(worker, acct, create_worker_cont))
kfree(worker);
}
-static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
+static bool create_io_worker(struct io_wq *wq, struct io_wq_acct *acct)
{
- struct io_wqe_acct *acct = &wqe->acct[index];
struct io_worker *worker;
struct task_struct *tsk;
__set_current_state(TASK_RUNNING);
- worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
+ worker = kzalloc(sizeof(*worker), GFP_KERNEL);
if (!worker) {
fail:
atomic_dec(&acct->nr_running);
- raw_spin_lock(&wqe->lock);
+ raw_spin_lock(&acct->workers_lock);
acct->nr_workers--;
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
io_worker_ref_put(wq);
return false;
}
refcount_set(&worker->ref, 1);
- worker->wqe = wqe;
+ worker->wq = wq;
+ worker->acct = acct;
raw_spin_lock_init(&worker->lock);
init_completion(&worker->ref_done);
- if (index == IO_WQ_ACCT_BOUND)
- worker->flags |= IO_WORKER_F_BOUND;
-
- tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
+ tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
if (!IS_ERR(tsk)) {
- io_init_new_worker(wqe, worker, tsk);
- } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
+ io_init_new_worker(wq, acct, worker, tsk);
+ } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) {
kfree(worker);
goto fail;
} else {
- INIT_WORK(&worker->work, io_workqueue_create);
- schedule_work(&worker->work);
+ INIT_DELAYED_WORK(&worker->work, io_workqueue_create);
+ queue_create_worker_retry(worker);
}
return true;
@@ -835,14 +926,14 @@ fail:
* Iterate the passed in list and call the specific function for each
* worker that isn't exiting
*/
-static bool io_wq_for_each_worker(struct io_wqe *wqe,
- bool (*func)(struct io_worker *, void *),
- void *data)
+static bool io_acct_for_each_worker(struct io_wq_acct *acct,
+ bool (*func)(struct io_worker *, void *),
+ void *data)
{
struct io_worker *worker;
bool ret = false;
- list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
+ list_for_each_entry_rcu(worker, &acct->all_list, all_list) {
if (io_worker_get(worker)) {
/* no task if node is/was offline */
if (worker->task)
@@ -856,6 +947,18 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
return ret;
}
+static bool io_wq_for_each_worker(struct io_wq *wq,
+ bool (*func)(struct io_worker *, void *),
+ void *data)
+{
+ for (int i = 0; i < IO_WQ_ACCT_NR; i++) {
+ if (!io_acct_for_each_worker(&wq->acct[i], func, data))
+ return false;
+ }
+
+ return true;
+}
+
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
__set_notify_signal(worker->task);
@@ -863,32 +966,30 @@ static bool io_wq_worker_wake(struct io_worker *worker, void *data)
return false;
}
-static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
+static void io_run_cancel(struct io_wq_work *work, struct io_wq *wq)
{
- struct io_wq *wq = wqe->wq;
-
do {
- work->flags |= IO_WQ_WORK_CANCEL;
- wq->do_work(work);
- work = wq->free_work(work);
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
+ io_wq_submit_work(work);
+ work = io_wq_free_work(work);
} while (work);
}
-static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
+static void io_wq_insert_work(struct io_wq *wq, struct io_wq_acct *acct,
+ struct io_wq_work *work, unsigned int work_flags)
{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
unsigned int hash;
struct io_wq_work *tail;
- if (!io_wq_is_hashed(work)) {
+ if (!__io_wq_is_hashed(work_flags)) {
append:
wq_list_add_tail(&work->list, &acct->work_list);
return;
}
- hash = io_get_work_hash(work);
- tail = wqe->hash_tail[hash];
- wqe->hash_tail[hash] = work;
+ hash = __io_get_work_hash(work_flags);
+ tail = wq->hash_tail[hash];
+ wq->hash_tail[hash] = work;
if (!tail)
goto append;
@@ -900,66 +1001,56 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
return work == data;
}
-static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
+void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
- struct io_cb_cancel_data match;
- unsigned work_flags = work->flags;
+ unsigned int work_flags = atomic_read(&work->flags);
+ struct io_wq_acct *acct = io_work_get_acct(wq, work_flags);
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_item,
+ .data = work,
+ .cancel_all = false,
+ };
bool do_create;
/*
* If io-wq is exiting for this task, or if the request has explicitly
* been marked as one that should not get executed, cancel it here.
*/
- if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
- (work->flags & IO_WQ_WORK_CANCEL)) {
- io_run_cancel(work, wqe);
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
+ (work_flags & IO_WQ_WORK_CANCEL)) {
+ io_run_cancel(work, wq);
return;
}
raw_spin_lock(&acct->lock);
- io_wqe_insert_work(wqe, work);
+ io_wq_insert_work(wq, acct, work, work_flags);
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
raw_spin_unlock(&acct->lock);
- raw_spin_lock(&wqe->lock);
rcu_read_lock();
- do_create = !io_wqe_activate_free_worker(wqe, acct);
+ do_create = !io_acct_activate_free_worker(acct);
rcu_read_unlock();
- raw_spin_unlock(&wqe->lock);
-
if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
!atomic_read(&acct->nr_running))) {
bool did_create;
- did_create = io_wqe_create_worker(wqe, acct);
+ did_create = io_wq_create_worker(wq, acct);
if (likely(did_create))
return;
- raw_spin_lock(&wqe->lock);
+ raw_spin_lock(&acct->workers_lock);
if (acct->nr_workers) {
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
return;
}
- raw_spin_unlock(&wqe->lock);
+ raw_spin_unlock(&acct->workers_lock);
/* fatal condition, failed to create the first worker */
- match.fn = io_wq_work_match_item,
- match.data = work,
- match.cancel_all = false,
-
- io_acct_cancel_pending_work(wqe, acct, &match);
+ io_acct_cancel_pending_work(wq, acct, &match);
}
}
-void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
-{
- struct io_wqe *wqe = wq->wqes[numa_node_id()];
-
- io_wqe_enqueue(wqe, work);
-}
-
/*
* Work items that hash to the same value will not be done in parallel.
* Used to limit concurrent writes, generally hashed by inode.
@@ -969,7 +1060,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
unsigned int bit;
bit = hash_ptr(val, IO_WQ_HASH_ORDER);
- work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
+ atomic_or(IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT), &work->flags);
}
static bool __io_wq_worker_cancel(struct io_worker *worker,
@@ -977,7 +1068,7 @@ static bool __io_wq_worker_cancel(struct io_worker *worker,
struct io_wq_work *work)
{
if (work && match->fn(work, match->data)) {
- work->flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
__set_notify_signal(worker->task);
return true;
}
@@ -994,35 +1085,34 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
* may dereference the passed in work.
*/
raw_spin_lock(&worker->lock);
- if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
- __io_wq_worker_cancel(worker, match, worker->next_work))
+ if (__io_wq_worker_cancel(worker, match, worker->cur_work))
match->nr_running++;
raw_spin_unlock(&worker->lock);
return match->nr_running && !match->cancel_all;
}
-static inline void io_wqe_remove_pending(struct io_wqe *wqe,
+static inline void io_wq_remove_pending(struct io_wq *wq,
+ struct io_wq_acct *acct,
struct io_wq_work *work,
struct io_wq_work_node *prev)
{
- struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
unsigned int hash = io_get_work_hash(work);
struct io_wq_work *prev_work = NULL;
- if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
+ if (io_wq_is_hashed(work) && work == wq->hash_tail[hash]) {
if (prev)
prev_work = container_of(prev, struct io_wq_work, list);
if (prev_work && io_get_work_hash(prev_work) == hash)
- wqe->hash_tail[hash] = prev_work;
+ wq->hash_tail[hash] = prev_work;
else
- wqe->hash_tail[hash] = NULL;
+ wq->hash_tail[hash] = NULL;
}
wq_list_del(&acct->work_list, &work->list, prev);
}
-static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
- struct io_wqe_acct *acct,
+static bool io_acct_cancel_pending_work(struct io_wq *wq,
+ struct io_wq_acct *acct,
struct io_cb_cancel_data *match)
{
struct io_wq_work_node *node, *prev;
@@ -1033,9 +1123,9 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
work = container_of(node, struct io_wq_work, list);
if (!match->fn(work, match->data))
continue;
- io_wqe_remove_pending(wqe, work, prev);
+ io_wq_remove_pending(wq, acct, work, prev);
raw_spin_unlock(&acct->lock);
- io_run_cancel(work, wqe);
+ io_run_cancel(work, wq);
match->nr_pending++;
/* not safe to continue after unlock */
return true;
@@ -1045,15 +1135,15 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
return false;
}
-static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
- struct io_cb_cancel_data *match)
+static void io_wq_cancel_pending_work(struct io_wq *wq,
+ struct io_cb_cancel_data *match)
{
int i;
retry:
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
+ struct io_wq_acct *acct = io_get_acct(wq, i == 0);
- if (io_acct_cancel_pending_work(wqe, acct, match)) {
+ if (io_acct_cancel_pending_work(wq, acct, match)) {
if (match->cancel_all)
goto retry;
break;
@@ -1061,11 +1151,22 @@ retry:
}
}
-static void io_wqe_cancel_running_work(struct io_wqe *wqe,
+static void io_acct_cancel_running_work(struct io_wq_acct *acct,
+ struct io_cb_cancel_data *match)
+{
+ raw_spin_lock(&acct->workers_lock);
+ io_acct_for_each_worker(acct, io_wq_worker_cancel, match);
+ raw_spin_unlock(&acct->workers_lock);
+}
+
+static void io_wq_cancel_running_work(struct io_wq *wq,
struct io_cb_cancel_data *match)
{
rcu_read_lock();
- io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
+
+ for (int i = 0; i < IO_WQ_ACCT_NR; i++)
+ io_acct_cancel_running_work(&wq->acct[i], match);
+
rcu_read_unlock();
}
@@ -1077,7 +1178,6 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
.data = data,
.cancel_all = cancel_all,
};
- int node;
/*
* First check pending list, if we're lucky we can just remove it
@@ -1089,22 +1189,16 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
* as an indication that we attempt to signal cancellation. The
* completion will run normally in this case.
*
- * Do both of these while holding the wqe->lock, to ensure that
+ * Do both of these while holding the acct->workers_lock, to ensure that
* we'll find a work item regardless of state.
*/
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wqe_cancel_pending_work(wqe, &match);
- if (match.nr_pending && !match.cancel_all)
- return IO_WQ_CANCEL_OK;
-
- raw_spin_lock(&wqe->lock);
- io_wqe_cancel_running_work(wqe, &match);
- raw_spin_unlock(&wqe->lock);
- if (match.nr_running && !match.cancel_all)
- return IO_WQ_CANCEL_RUNNING;
- }
+ io_wq_cancel_pending_work(wq, &match);
+ if (match.nr_pending && !match.cancel_all)
+ return IO_WQ_CANCEL_OK;
+
+ io_wq_cancel_running_work(wq, &match);
+ if (match.nr_running && !match.cancel_all)
+ return IO_WQ_CANCEL_RUNNING;
if (match.nr_running)
return IO_WQ_CANCEL_RUNNING;
@@ -1113,20 +1207,20 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
return IO_WQ_CANCEL_NOTFOUND;
}
-static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
+static int io_wq_hash_wake(struct wait_queue_entry *wait, unsigned mode,
int sync, void *key)
{
- struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
+ struct io_wq *wq = container_of(wait, struct io_wq, wait);
int i;
list_del_init(&wait->entry);
rcu_read_lock();
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = &wqe->acct[i];
+ struct io_wq_acct *acct = &wq->acct[i];
if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
- io_wqe_activate_free_worker(wqe, acct);
+ io_acct_activate_free_worker(acct);
}
rcu_read_unlock();
return 1;
@@ -1134,74 +1228,55 @@ static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
{
- int ret, node, i;
+ int ret, i;
struct io_wq *wq;
- if (WARN_ON_ONCE(!data->free_work || !data->do_work))
- return ERR_PTR(-EINVAL);
if (WARN_ON_ONCE(!bounded))
return ERR_PTR(-EINVAL);
- wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
+ wq = kzalloc(sizeof(struct io_wq), GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
- ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- if (ret)
- goto err_wq;
refcount_inc(&data->hash->refs);
wq->hash = data->hash;
- wq->free_work = data->free_work;
- wq->do_work = data->do_work;
ret = -ENOMEM;
- for_each_node(node) {
- struct io_wqe *wqe;
- int alloc_node = node;
-
- if (!node_online(alloc_node))
- alloc_node = NUMA_NO_NODE;
- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
- if (!wqe)
- goto err;
- wq->wqes[node] = wqe;
- if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
- goto err;
- cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
- wqe->node = alloc_node;
- wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
- wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
- task_rlimit(current, RLIMIT_NPROC);
- INIT_LIST_HEAD(&wqe->wait.entry);
- wqe->wait.func = io_wqe_hash_wake;
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- struct io_wqe_acct *acct = &wqe->acct[i];
-
- acct->index = i;
- atomic_set(&acct->nr_running, 0);
- INIT_WQ_LIST(&acct->work_list);
- raw_spin_lock_init(&acct->lock);
- }
- wqe->wq = wq;
- raw_spin_lock_init(&wqe->lock);
- INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
- INIT_LIST_HEAD(&wqe->all_list);
+
+ if (!alloc_cpumask_var(&wq->cpu_mask, GFP_KERNEL))
+ goto err;
+ cpuset_cpus_allowed(data->task, wq->cpu_mask);
+ wq->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
+ wq->acct[IO_WQ_ACCT_UNBOUND].max_workers =
+ task_rlimit(current, RLIMIT_NPROC);
+ INIT_LIST_HEAD(&wq->wait.entry);
+ wq->wait.func = io_wq_hash_wake;
+ for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+ struct io_wq_acct *acct = &wq->acct[i];
+
+ atomic_set(&acct->nr_running, 0);
+
+ raw_spin_lock_init(&acct->workers_lock);
+ INIT_HLIST_NULLS_HEAD(&acct->free_list, 0);
+ INIT_LIST_HEAD(&acct->all_list);
+
+ INIT_WQ_LIST(&acct->work_list);
+ raw_spin_lock_init(&acct->lock);
}
wq->task = get_task_struct(data->task);
atomic_set(&wq->worker_refs, 1);
init_completion(&wq->worker_done);
+ ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
+ if (ret) {
+ put_task_struct(wq->task);
+ goto err;
+ }
+
return wq;
err:
io_wq_put_hash(data->hash);
- cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
- for_each_node(node) {
- if (!wq->wqes[node])
- continue;
- free_cpumask_var(wq->wqes[node]->cpu_mask);
- kfree(wq->wqes[node]);
- }
-err_wq:
+ free_cpumask_var(wq->cpu_mask);
kfree(wq);
return ERR_PTR(ret);
}
@@ -1213,7 +1288,7 @@ static bool io_task_work_match(struct callback_head *cb, void *data)
if (cb->func != create_worker_cb && cb->func != create_worker_cont)
return false;
worker = container_of(cb, struct io_worker, create_work);
- return worker->wqe->wq == data;
+ return worker->wq == data;
}
void io_wq_exit_start(struct io_wq *wq)
@@ -1241,48 +1316,35 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
static void io_wq_exit_workers(struct io_wq *wq)
{
- int node;
-
if (!wq->task)
return;
io_wq_cancel_tw_create(wq);
rcu_read_lock();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
- }
+ io_wq_for_each_worker(wq, io_wq_worker_wake, NULL);
rcu_read_unlock();
io_worker_ref_put(wq);
wait_for_completion(&wq->worker_done);
- for_each_node(node) {
- spin_lock_irq(&wq->hash->wait.lock);
- list_del_init(&wq->wqes[node]->wait.entry);
- spin_unlock_irq(&wq->hash->wait.lock);
- }
+ spin_lock_irq(&wq->hash->wait.lock);
+ list_del_init(&wq->wait.entry);
+ spin_unlock_irq(&wq->hash->wait.lock);
+
put_task_struct(wq->task);
wq->task = NULL;
}
static void io_wq_destroy(struct io_wq *wq)
{
- int node;
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_all,
+ .cancel_all = true,
+ };
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
-
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- struct io_cb_cancel_data match = {
- .fn = io_wq_work_match_all,
- .cancel_all = true,
- };
- io_wqe_cancel_pending_work(wqe, &match);
- free_cpumask_var(wqe->cpu_mask);
- kfree(wqe);
- }
+ io_wq_cancel_pending_work(wq, &match);
+ free_cpumask_var(wq->cpu_mask);
io_wq_put_hash(wq->hash);
kfree(wq);
}
@@ -1305,9 +1367,9 @@ static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
struct online_data *od = data;
if (od->online)
- cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
+ cpumask_set_cpu(od->cpu, worker->wq->cpu_mask);
else
- cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
+ cpumask_clear_cpu(od->cpu, worker->wq->cpu_mask);
return false;
}
@@ -1317,11 +1379,9 @@ static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
.cpu = cpu,
.online = online
};
- int i;
rcu_read_lock();
- for_each_node(i)
- io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
+ io_wq_for_each_worker(wq, io_wq_worker_affinity, &od);
rcu_read_unlock();
return 0;
}
@@ -1340,21 +1400,31 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
return __io_wq_cpu_online(wq, cpu, false);
}
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{
- int i;
+ cpumask_var_t allowed_mask;
+ int ret = 0;
- rcu_read_lock();
- for_each_node(i) {
- struct io_wqe *wqe = wq->wqes[i];
+ if (!tctx || !tctx->io_wq)
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ return -ENOMEM;
- if (mask)
- cpumask_copy(wqe->cpu_mask, mask);
+ rcu_read_lock();
+ cpuset_cpus_allowed(tctx->io_wq->task, allowed_mask);
+ if (mask) {
+ if (cpumask_subset(mask, allowed_mask))
+ cpumask_copy(tctx->io_wq->cpu_mask, mask);
else
- cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
+ ret = -EINVAL;
+ } else {
+ cpumask_copy(tctx->io_wq->cpu_mask, allowed_mask);
}
rcu_read_unlock();
- return 0;
+
+ free_cpumask_var(allowed_mask);
+ return ret;
}
/*
@@ -1363,9 +1433,9 @@ int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
*/
int io_wq_max_workers(struct io_wq *wq, int *new_count)
{
+ struct io_wq_acct *acct;
int prev[IO_WQ_ACCT_NR];
- bool first_node = true;
- int i, node;
+ int i;
BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
@@ -1380,20 +1450,14 @@ int io_wq_max_workers(struct io_wq *wq, int *new_count)
prev[i] = 0;
rcu_read_lock();
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
- struct io_wqe_acct *acct;
-
- raw_spin_lock(&wqe->lock);
- for (i = 0; i < IO_WQ_ACCT_NR; i++) {
- acct = &wqe->acct[i];
- if (first_node)
- prev[i] = max_t(int, acct->max_workers, prev[i]);
- if (new_count[i])
- acct->max_workers = new_count[i];
- }
- raw_spin_unlock(&wqe->lock);
- first_node = false;
+
+ for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+ acct = &wq->acct[i];
+ raw_spin_lock(&acct->workers_lock);
+ prev[i] = max_t(int, acct->max_workers, prev[i]);
+ if (new_count[i])
+ acct->max_workers = new_count[i];
+ raw_spin_unlock(&acct->workers_lock);
}
rcu_read_unlock();
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 31228426d192..774abab54732 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -21,9 +21,6 @@ enum io_wq_cancel {
IO_WQ_CANCEL_NOTFOUND, /* work not found */
};
-typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *);
-typedef void (io_wq_work_fn)(struct io_wq_work *);
-
struct io_wq_hash {
refcount_t refs;
unsigned long map;
@@ -39,8 +36,6 @@ static inline void io_wq_put_hash(struct io_wq_hash *hash)
struct io_wq_data {
struct io_wq_hash *hash;
struct task_struct *task;
- io_wq_work_fn *do_work;
- free_work_fn *free_work;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
@@ -50,12 +45,18 @@ void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val);
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
+bool io_wq_worker_stopped(void);
+
+static inline bool __io_wq_is_hashed(unsigned int work_flags)
+{
+ return work_flags & IO_WQ_WORK_HASHED;
+}
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
- return work->flags & IO_WQ_WORK_HASHED;
+ return __io_wq_is_hashed(atomic_read(&work->flags));
}
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index db623b3185c8..5d130c578435 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -29,7 +29,7 @@
*
* Also see the examples in the liburing library:
*
- * git://git.kernel.dk/liburing
+ * git://git.kernel.org/pub/scm/linux/kernel/git/axboe/liburing.git
*
* io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
* from data shared between the kernel and application. This is done both
@@ -51,7 +51,6 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/percpu.h>
@@ -59,19 +58,19 @@
#include <linux/bvec.h>
#include <linux/net.h>
#include <net/sock.h>
-#include <net/af_unix.h>
-#include <net/scm.h>
#include <linux/anon_inodes.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
-#include <linux/highmem.h>
#include <linux/fsnotify.h>
#include <linux/fadvise.h>
#include <linux/task_work.h>
#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include <linux/audit.h>
#include <linux/security.h>
+#include <linux/jump_label.h>
+#include <asm/shmparam.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -80,10 +79,12 @@
#include "io-wq.h"
+#include "filetable.h"
#include "io_uring.h"
#include "opdef.h"
#include "refs.h"
#include "tctx.h"
+#include "register.h"
#include "sqpoll.h"
#include "fdinfo.h"
#include "kbuf.h"
@@ -91,86 +92,99 @@
#include "cancel.h"
#include "net.h"
#include "notif.h"
+#include "waitid.h"
+#include "futex.h"
+#include "napi.h"
+#include "uring_cmd.h"
+#include "msg_ring.h"
+#include "memmap.h"
+#include "zcrx.h"
#include "timeout.h"
#include "poll.h"
+#include "rw.h"
#include "alloc_cache.h"
-
-#define IORING_MAX_ENTRIES 32768
-#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
-
-#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
- IORING_REGISTER_LAST + IORING_OP_LAST)
+#include "eventfd.h"
#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
-#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
- IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
+#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
- REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
- REQ_F_ASYNC_DATA)
+ REQ_F_INFLIGHT | REQ_F_CREDS | REQ_F_ASYNC_DATA)
-#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
+#define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | IO_REQ_LINK_FLAGS | \
+ REQ_F_REISSUE | REQ_F_POLLED | \
IO_REQ_CLEAN_FLAGS)
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
#define IO_COMPL_BATCH 32
#define IO_REQ_ALLOC_BATCH 8
-
-enum {
- IO_CHECK_CQ_OVERFLOW_BIT,
- IO_CHECK_CQ_DROPPED_BIT,
-};
-
-enum {
- IO_EVENTFD_OP_SIGNAL_BIT,
- IO_EVENTFD_OP_FREE_BIT,
-};
-
-struct io_defer_entry {
- struct list_head list;
- struct io_kiocb *req;
- u32 seq;
-};
+#define IO_LOCAL_TW_DEFAULT_MAX 20
/* requests with any of those set should undergo io_disarm_next() */
#define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
-#define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
-
-static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all);
-
-static void io_dismantle_req(struct io_kiocb *req);
-static void io_clean_op(struct io_kiocb *req);
-static void io_queue_sqe(struct io_kiocb *req);
-static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
-static __cold void io_fallback_tw(struct io_uring_task *tctx);
-static struct kmem_cache *req_cachep;
+/*
+ * No waiters. It's larger than any valid value of the tw counter
+ * so that tests against ->cq_wait_nr would fail and skip wake_up().
+ */
+#define IO_CQ_WAKE_INIT (-1U)
+/* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
+#define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
+
+static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags);
+static void __io_req_caches_free(struct io_ring_ctx *ctx);
+
+static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray);
+
+struct kmem_cache *req_cachep;
+static struct workqueue_struct *iou_wq __ro_after_init;
+
+static int __read_mostly sysctl_io_uring_disabled;
+static int __read_mostly sysctl_io_uring_group = -1;
+
+#ifdef CONFIG_SYSCTL
+static const struct ctl_table kernel_io_uring_disabled_table[] = {
+ {
+ .procname = "io_uring_disabled",
+ .data = &sysctl_io_uring_disabled,
+ .maxlen = sizeof(sysctl_io_uring_disabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "io_uring_group",
+ .data = &sysctl_io_uring_group,
+ .maxlen = sizeof(gid_t),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+};
+#endif
-struct sock *io_uring_get_socket(struct file *file)
+static void io_poison_cached_req(struct io_kiocb *req)
{
-#if defined(CONFIG_UNIX)
- if (io_is_uring_fops(file)) {
- struct io_ring_ctx *ctx = file->private_data;
-
- return ctx->ring_sock->sk;
- }
-#endif
- return NULL;
+ req->ctx = IO_URING_PTR_POISON;
+ req->tctx = IO_URING_PTR_POISON;
+ req->file = IO_URING_PTR_POISON;
+ req->creds = IO_URING_PTR_POISON;
+ req->io_task_work.func = IO_URING_PTR_POISON;
+ req->apoll = IO_URING_PTR_POISON;
}
-EXPORT_SYMBOL(io_uring_get_socket);
-static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
+static void io_poison_req(struct io_kiocb *req)
{
- if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
- ctx->submit_state.cqes_count)
- __io_submit_flush_completions(ctx);
+ io_poison_cached_req(req);
+ req->async_data = IO_URING_PTR_POISON;
+ req->kbuf = IO_URING_PTR_POISON;
+ req->comp_list.next = IO_URING_PTR_POISON;
+ req->file_node = IO_URING_PTR_POISON;
+ req->link = IO_URING_PTR_POISON;
}
static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
@@ -183,44 +197,6 @@ static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
}
-static bool io_match_linked(struct io_kiocb *head)
-{
- struct io_kiocb *req;
-
- io_for_each_link(req, head) {
- if (req->flags & REQ_F_INFLIGHT)
- return true;
- }
- return false;
-}
-
-/*
- * As io_match_task() but protected against racing with linked timeouts.
- * User must not hold timeout_lock.
- */
-bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
- bool cancel_all)
-{
- bool matched;
-
- if (task && head->task != task)
- return false;
- if (cancel_all)
- return true;
-
- if (head->flags & REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = head->ctx;
-
- /* protect against races with linked timeouts */
- spin_lock_irq(&ctx->timeout_lock);
- matched = io_match_linked(head);
- spin_unlock_irq(&ctx->timeout_lock);
- } else {
- matched = io_match_linked(head);
- }
- return matched;
-}
-
static inline void req_fail_link_node(struct io_kiocb *req, int res)
{
req_set_fail(req);
@@ -229,6 +205,8 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res)
static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
{
+ if (IS_ENABLED(CONFIG_KASAN))
+ io_poison_cached_req(req);
wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
}
@@ -239,43 +217,75 @@ static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
complete(&ctx->ref_comp);
}
+/*
+ * Terminate the request if either of these conditions are true:
+ *
+ * 1) It's being executed by the original task, but that task is marked
+ * with PF_EXITING as it's exiting.
+ * 2) PF_KTHREAD is set, in which case the invoker of the task_work is
+ * our fallback task_work.
+ * 3) The ring has been closed and is going away.
+ */
+static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx)
+{
+ return (current->flags & (PF_EXITING | PF_KTHREAD)) || percpu_ref_is_dying(&ctx->refs);
+}
+
static __cold void io_fallback_req_func(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
fallback_work.work);
struct llist_node *node = llist_del_all(&ctx->fallback_llist);
struct io_kiocb *req, *tmp;
- bool locked = false;
+ struct io_tw_state ts = {};
percpu_ref_get(&ctx->refs);
+ mutex_lock(&ctx->uring_lock);
+ ts.cancel = io_should_terminate_tw(ctx);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
- req->io_task_work.func(req, &locked);
-
- if (locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- }
+ req->io_task_work.func((struct io_tw_req){req}, ts);
+ io_submit_flush_completions(ctx);
+ mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
{
- unsigned hash_buckets = 1U << bits;
- size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
+ unsigned int hash_buckets;
+ int i;
- table->hbs = kmalloc(hash_size, GFP_KERNEL);
- if (!table->hbs)
- return -ENOMEM;
+ do {
+ hash_buckets = 1U << bits;
+ table->hbs = kvmalloc_array(hash_buckets, sizeof(table->hbs[0]),
+ GFP_KERNEL_ACCOUNT);
+ if (table->hbs)
+ break;
+ if (bits == 1)
+ return -ENOMEM;
+ bits--;
+ } while (1);
table->hash_bits = bits;
- init_hash_table(table, hash_buckets);
+ for (i = 0; i < hash_buckets; i++)
+ INIT_HLIST_HEAD(&table->hbs[i].list);
return 0;
}
+static void io_free_alloc_caches(struct io_ring_ctx *ctx)
+{
+ io_alloc_cache_free(&ctx->apoll_cache, kfree);
+ io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+ io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
+ io_alloc_cache_free(&ctx->cmd_cache, io_cmd_cache_free);
+ io_futex_cache_free(ctx);
+ io_rsrc_cache_free(ctx);
+}
+
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{
struct io_ring_ctx *ctx;
int hash_bits;
+ bool ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -292,84 +302,100 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
hash_bits = clamp(hash_bits, 1, 8);
if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
goto err;
- if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
- goto err;
-
- ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
- if (!ctx->dummy_ubuf)
- goto err;
- /* set invalid range, so io_import_fixed() fails meeting it */
- ctx->dummy_ubuf->ubuf = -1UL;
-
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
0, GFP_KERNEL))
goto err;
ctx->flags = p->flags;
+ ctx->hybrid_poll_time = LLONG_MAX;
+ atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
init_waitqueue_head(&ctx->sqo_sq_wait);
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
- INIT_LIST_HEAD(&ctx->io_buffers_cache);
- io_alloc_cache_init(&ctx->apoll_cache);
- io_alloc_cache_init(&ctx->netmsg_cache);
+ ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
+ sizeof(struct async_poll), 0);
+ ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct io_async_msghdr),
+ offsetof(struct io_async_msghdr, clear));
+ ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct io_async_rw),
+ offsetof(struct io_async_rw, clear));
+ ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
+ sizeof(struct io_async_cmd),
+ sizeof(struct io_async_cmd));
+ ret |= io_futex_cache_init(ctx);
+ ret |= io_rsrc_cache_init(ctx);
+ if (ret)
+ goto free_ref;
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->cq_wait);
+ init_waitqueue_head(&ctx->poll_wq);
spin_lock_init(&ctx->completion_lock);
- spin_lock_init(&ctx->timeout_lock);
+ raw_spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
- INIT_LIST_HEAD(&ctx->io_buffers_pages);
- INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
INIT_LIST_HEAD(&ctx->ltimeout_list);
- spin_lock_init(&ctx->rsrc_ref_lock);
- INIT_LIST_HEAD(&ctx->rsrc_ref_list);
- INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
- init_task_work(&ctx->rsrc_put_tw, io_rsrc_put_tw);
- init_llist_head(&ctx->rsrc_put_llist);
init_llist_head(&ctx->work_llist);
INIT_LIST_HEAD(&ctx->tctx_list);
ctx->submit_state.free_list.next = NULL;
- INIT_WQ_LIST(&ctx->locked_free_list);
+ INIT_HLIST_HEAD(&ctx->waitid_list);
+ xa_init_flags(&ctx->zcrx_ctxs, XA_FLAGS_ALLOC);
+#ifdef CONFIG_FUTEX
+ INIT_HLIST_HEAD(&ctx->futex_list);
+#endif
INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
+ INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
+ io_napi_init(ctx);
+ mutex_init(&ctx->mmap_lock);
+
return ctx;
+
+free_ref:
+ percpu_ref_exit(&ctx->refs);
err:
- kfree(ctx->dummy_ubuf);
- kfree(ctx->cancel_table.hbs);
- kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->io_bl);
+ io_free_alloc_caches(ctx);
+ kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
return NULL;
}
-static void io_account_cq_overflow(struct io_ring_ctx *ctx)
+static void io_clean_op(struct io_kiocb *req)
{
- struct io_rings *r = ctx->rings;
-
- WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
- ctx->cq_extra--;
-}
+ if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
+ io_kbuf_drop_legacy(req);
-static bool req_need_defer(struct io_kiocb *req, u32 seq)
-{
- if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
- struct io_ring_ctx *ctx = req->ctx;
+ if (req->flags & REQ_F_NEED_CLEANUP) {
+ const struct io_cold_def *def = &io_cold_defs[req->opcode];
- return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
+ if (def->cleanup)
+ def->cleanup(req);
}
-
- return false;
+ if (req->flags & REQ_F_INFLIGHT)
+ atomic_dec(&req->tctx->inflight_tracked);
+ if (req->flags & REQ_F_CREDS)
+ put_cred(req->creds);
+ if (req->flags & REQ_F_ASYNC_DATA) {
+ kfree(req->async_data);
+ req->async_data = NULL;
+ }
+ req->flags &= ~IO_REQ_CLEAN_FLAGS;
}
-static inline void io_req_track_inflight(struct io_kiocb *req)
+/*
+ * Mark the request as inflight, so that file cancelation will find it.
+ * Can be used if the file is an io_uring instance, or if the request itself
+ * relies on ->mm being alive for the duration of the request.
+ */
+inline void io_req_track_inflight(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_INFLIGHT)) {
req->flags |= REQ_F_INFLIGHT;
- atomic_inc(&req->task->io_uring->inflight_tracked);
+ atomic_inc(&req->tctx->inflight_tracked);
}
}
@@ -387,27 +413,9 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
return req->link;
}
-static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
- return NULL;
- return __io_prep_linked_timeout(req);
-}
-
-static noinline void __io_arm_ltimeout(struct io_kiocb *req)
-{
- io_queue_linked_timeout(__io_prep_linked_timeout(req));
-}
-
-static inline void io_arm_ltimeout(struct io_kiocb *req)
-{
- if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
- __io_arm_ltimeout(req);
-}
-
static void io_prep_async_work(struct io_kiocb *req)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
if (!(req->flags & REQ_F_CREDS)) {
@@ -416,20 +424,25 @@ static void io_prep_async_work(struct io_kiocb *req)
}
req->work.list.next = NULL;
- req->work.flags = 0;
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
+ atomic_set(&req->work.flags, 0);
if (req->flags & REQ_F_FORCE_ASYNC)
- req->work.flags |= IO_WQ_WORK_CONCURRENT;
+ atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
+
+ if (req->file && !(req->flags & REQ_F_FIXED_FILE))
+ req->flags |= io_file_get_flags(req->file);
- if (req->file && !io_req_ffs_set(req))
- req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
+ if (req->file && (req->flags & REQ_F_ISREG)) {
+ bool should_hash = def->hash_reg_file;
- if (req->flags & REQ_F_ISREG) {
- if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
+ /* don't serialize this request if the fs doesn't need it */
+ if (should_hash && (req->file->f_flags & O_DIRECT) &&
+ (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
+ should_hash = false;
+ if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
io_wq_hash_work(&req->work, file_inode(req->file));
} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
if (def->unbound_nonreg_file)
- req->work.flags |= IO_WQ_WORK_UNBOUND;
+ atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
}
}
@@ -440,23 +453,26 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
}
}
-void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
+static void io_queue_iowq(struct io_kiocb *req)
{
- struct io_kiocb *link = io_prep_linked_timeout(req);
- struct io_uring_task *tctx = req->task->io_uring;
+ struct io_uring_task *tctx = req->tctx;
BUG_ON(!tctx);
- BUG_ON(!tctx->io_wq);
+
+ if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
+ io_req_task_queue_fail(req, -ECANCELED);
+ return;
+ }
/* init ->work of the whole link before punting */
io_prep_async_link(req);
@@ -468,206 +484,149 @@ void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
* procedure rather than attempt to run this request (or create a new
* worker for it).
*/
- if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
- req->work.flags |= IO_WQ_WORK_CANCEL;
+ if (WARN_ON_ONCE(!same_thread_group(tctx->task, current)))
+ atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
io_wq_enqueue(tctx->io_wq, &req->work);
- if (link)
- io_queue_linked_timeout(link);
}
-static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
+static void io_req_queue_iowq_tw(struct io_tw_req tw_req, io_tw_token_t tw)
{
- while (!list_empty(&ctx->defer_list)) {
- struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
- struct io_defer_entry, list);
-
- if (req_need_defer(de->req, de->seq))
- break;
- list_del_init(&de->list);
- io_req_task_queue(de->req);
- kfree(de);
- }
+ io_queue_iowq(tw_req.req);
}
-
-static void io_eventfd_ops(struct rcu_head *rcu)
+void io_req_queue_iowq(struct io_kiocb *req)
{
- struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
- int ops = atomic_xchg(&ev_fd->ops, 0);
-
- if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
- eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
-
- /* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
- * ordering in a race but if references are 0 we know we have to free
- * it regardless.
- */
- if (atomic_dec_and_test(&ev_fd->refs)) {
- eventfd_ctx_put(ev_fd->cq_ev_fd);
- kfree(ev_fd);
- }
+ req->io_task_work.func = io_req_queue_iowq_tw;
+ io_req_task_work_add(req);
}
-static void io_eventfd_signal(struct io_ring_ctx *ctx)
+unsigned io_linked_nr(struct io_kiocb *req)
{
- struct io_ev_fd *ev_fd = NULL;
-
- rcu_read_lock();
- /*
- * rcu_dereference ctx->io_ev_fd once and use it for both for checking
- * and eventfd_signal
- */
- ev_fd = rcu_dereference(ctx->io_ev_fd);
-
- /*
- * Check again if ev_fd exists incase an io_eventfd_unregister call
- * completed between the NULL check of ctx->io_ev_fd at the start of
- * the function and rcu_read_lock.
- */
- if (unlikely(!ev_fd))
- goto out;
- if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
- goto out;
- if (ev_fd->eventfd_async && !io_wq_current_is_worker())
- goto out;
+ struct io_kiocb *tmp;
+ unsigned nr = 0;
- if (likely(eventfd_signal_allowed())) {
- eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
- } else {
- atomic_inc(&ev_fd->refs);
- if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
- call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
- else
- atomic_dec(&ev_fd->refs);
- }
-
-out:
- rcu_read_unlock();
+ io_for_each_link(tmp, req)
+ nr++;
+ return nr;
}
-static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
+static __cold noinline void io_queue_deferred(struct io_ring_ctx *ctx)
{
- bool skip;
+ bool drain_seen = false, first = true;
- spin_lock(&ctx->completion_lock);
+ lockdep_assert_held(&ctx->uring_lock);
+ __io_req_caches_free(ctx);
- /*
- * Eventfd should only get triggered when at least one event has been
- * posted. Some applications rely on the eventfd notification count
- * only changing IFF a new CQE has been added to the CQ ring. There's
- * no depedency on 1:1 relationship between how many times this
- * function is called (and hence the eventfd count) and number of CQEs
- * posted to the CQ ring.
- */
- skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
- spin_unlock(&ctx->completion_lock);
- if (skip)
- return;
+ while (!list_empty(&ctx->defer_list)) {
+ struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
+ struct io_defer_entry, list);
+
+ drain_seen |= de->req->flags & REQ_F_IO_DRAIN;
+ if ((drain_seen || first) && ctx->nr_req_allocated != ctx->nr_drained)
+ return;
- io_eventfd_signal(ctx);
+ list_del_init(&de->list);
+ ctx->nr_drained -= io_linked_nr(de->req);
+ io_req_task_queue(de->req);
+ kfree(de);
+ first = false;
+ }
}
void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
+ if (ctx->poll_activated)
+ io_poll_wq_wake(ctx);
if (ctx->off_timeout_used)
io_flush_timeouts(ctx);
- if (ctx->drain_active) {
- spin_lock(&ctx->completion_lock);
- io_queue_deferred(ctx);
- spin_unlock(&ctx->completion_lock);
- }
if (ctx->has_evfd)
- io_eventfd_flush_signal(ctx);
+ io_eventfd_signal(ctx, true);
}
static inline void __io_cq_lock(struct io_ring_ctx *ctx)
- __acquires(ctx->completion_lock)
{
- if (!ctx->task_complete)
+ if (!ctx->lockless_cq)
spin_lock(&ctx->completion_lock);
}
-static inline void __io_cq_unlock(struct io_ring_ctx *ctx)
-{
- if (!ctx->task_complete)
- spin_unlock(&ctx->completion_lock);
-}
-
static inline void io_cq_lock(struct io_ring_ctx *ctx)
__acquires(ctx->completion_lock)
{
spin_lock(&ctx->completion_lock);
}
-static inline void io_cq_unlock(struct io_ring_ctx *ctx)
- __releases(ctx->completion_lock)
-{
- spin_unlock(&ctx->completion_lock);
-}
-
-/* keep it inlined for io_submit_flush_completions() */
static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
- __releases(ctx->completion_lock)
{
io_commit_cqring(ctx);
- __io_cq_unlock(ctx);
+ if (!ctx->task_complete) {
+ if (!ctx->lockless_cq)
+ spin_unlock(&ctx->completion_lock);
+ /* IOPOLL rings only need to wake up if it's also SQPOLL */
+ if (!ctx->syscall_iopoll)
+ io_cqring_wake(ctx);
+ }
io_commit_cqring_flush(ctx);
- io_cqring_wake(ctx);
}
-void io_cq_unlock_post(struct io_ring_ctx *ctx)
+static void io_cq_unlock_post(struct io_ring_ctx *ctx)
__releases(ctx->completion_lock)
{
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
- io_commit_cqring_flush(ctx);
io_cqring_wake(ctx);
+ io_commit_cqring_flush(ctx);
}
-/* Returns true if there are no backlogged entries after the flush */
-static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
-{
- struct io_overflow_cqe *ocqe;
- LIST_HEAD(list);
-
- io_cq_lock(ctx);
- list_splice_init(&ctx->cq_overflow_list, &list);
- clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
- io_cq_unlock(ctx);
-
- while (!list_empty(&list)) {
- ocqe = list_first_entry(&list, struct io_overflow_cqe, list);
- list_del(&ocqe->list);
- kfree(ocqe);
- }
-}
-
-/* Returns true if there are no backlogged entries after the flush */
-static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
{
- size_t cqe_size = sizeof(struct io_uring_cqe);
+ lockdep_assert_held(&ctx->uring_lock);
- if (__io_cqring_events(ctx) == ctx->cq_entries)
+ /* don't abort if we're dying, entries must get freed */
+ if (!dying && __io_cqring_events(ctx) == ctx->cq_entries)
return;
- if (ctx->flags & IORING_SETUP_CQE32)
- cqe_size <<= 1;
-
io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
- struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
+ size_t cqe_size = sizeof(struct io_uring_cqe);
+ struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe;
+ bool is_cqe32 = false;
- if (!cqe)
- break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
- memcpy(cqe, &ocqe->cqe, cqe_size);
+ if (ocqe->cqe.flags & IORING_CQE_F_32 ||
+ ctx->flags & IORING_SETUP_CQE32) {
+ is_cqe32 = true;
+ cqe_size <<= 1;
+ }
+ if (ctx->flags & IORING_SETUP_CQE32)
+ is_cqe32 = false;
+
+ if (!dying) {
+ if (!io_get_cqe_overflow(ctx, &cqe, true, is_cqe32))
+ break;
+ memcpy(cqe, &ocqe->cqe, cqe_size);
+ }
list_del(&ocqe->list);
kfree(ocqe);
+
+ /*
+ * For silly syzbot cases that deliberately overflow by huge
+ * amounts, check if we need to resched and drop and
+ * reacquire the locks if so. Nothing real would ever hit this.
+ * Ideally we'd have a non-posting unlock for this, but hard
+ * to care for a non-real case.
+ */
+ if (need_resched()) {
+ ctx->cqe_sentinel = ctx->cqe_cached;
+ io_cq_unlock_post(ctx);
+ mutex_unlock(&ctx->uring_lock);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ io_cq_lock(ctx);
+ }
}
if (list_empty(&ctx->cq_overflow_list)) {
@@ -677,30 +636,32 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx);
}
-static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
+static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
{
- /* iopoll syncs against uring_lock, not completion_lock */
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&ctx->uring_lock);
- __io_cqring_overflow_flush(ctx);
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&ctx->uring_lock);
+ if (ctx->rings)
+ __io_cqring_overflow_flush(ctx, true);
}
-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
+static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
{
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
- io_cqring_do_overflow_flush(ctx);
+ mutex_lock(&ctx->uring_lock);
+ __io_cqring_overflow_flush(ctx, false);
+ mutex_unlock(&ctx->uring_lock);
}
-void __io_put_task(struct task_struct *task, int nr)
+/* must to be called somewhat shortly after putting a request */
+static inline void io_put_task(struct io_kiocb *req)
{
- struct io_uring_task *tctx = task->io_uring;
+ struct io_uring_task *tctx = req->tctx;
- percpu_counter_sub(&tctx->inflight, nr);
- if (unlikely(atomic_read(&tctx->in_idle)))
- wake_up(&tctx->wait);
- put_task_struct_many(task, nr);
+ if (likely(tctx->task == current)) {
+ tctx->cached_refs++;
+ } else {
+ percpu_counter_sub(&tctx->inflight, 1);
+ if (unlikely(atomic_read(&tctx->in_cancel)))
+ wake_up(&tctx->wait);
+ put_task_struct(tctx->task);
+ }
}
void io_task_refs_refill(struct io_uring_task *tctx)
@@ -712,7 +673,7 @@ void io_task_refs_refill(struct io_uring_task *tctx)
tctx->cached_refs += refill;
}
-static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
+__cold void io_uring_drop_tctx_refs(struct task_struct *task)
{
struct io_uring_task *tctx = task->io_uring;
unsigned int refs = tctx->cached_refs;
@@ -724,27 +685,20 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
}
}
-static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags, u64 extra1, u64 extra2)
+static __cold bool io_cqring_add_overflow(struct io_ring_ctx *ctx,
+ struct io_overflow_cqe *ocqe)
{
- struct io_overflow_cqe *ocqe;
- size_t ocq_size = sizeof(struct io_overflow_cqe);
- bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
-
lockdep_assert_held(&ctx->completion_lock);
- if (is_cqe32)
- ocq_size += sizeof(struct io_uring_cqe);
-
- ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
- trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
if (!ocqe) {
+ struct io_rings *r = ctx->rings;
+
/*
* If we're in ring overflow flush mode, or in task cancel mode,
* or cannot allocate an overflow entry, then we need to drop it
* on the floor.
*/
- io_account_cq_overflow(ctx);
+ WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
return false;
}
@@ -753,26 +707,55 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
}
- ocqe->cqe.user_data = user_data;
- ocqe->cqe.res = res;
- ocqe->cqe.flags = cflags;
- if (is_cqe32) {
- ocqe->cqe.big_cqe[0] = extra1;
- ocqe->cqe.big_cqe[1] = extra2;
- }
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
return true;
}
-bool io_req_cqe_overflow(struct io_kiocb *req)
+static struct io_overflow_cqe *io_alloc_ocqe(struct io_ring_ctx *ctx,
+ struct io_cqe *cqe,
+ struct io_big_cqe *big_cqe, gfp_t gfp)
{
- if (!(req->flags & REQ_F_CQE32_INIT)) {
- req->extra1 = 0;
- req->extra2 = 0;
+ struct io_overflow_cqe *ocqe;
+ size_t ocq_size = sizeof(struct io_overflow_cqe);
+ bool is_cqe32 = false;
+
+ if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32) {
+ is_cqe32 = true;
+ ocq_size += sizeof(struct io_uring_cqe);
+ }
+
+ ocqe = kzalloc(ocq_size, gfp | __GFP_ACCOUNT);
+ trace_io_uring_cqe_overflow(ctx, cqe->user_data, cqe->res, cqe->flags, ocqe);
+ if (ocqe) {
+ ocqe->cqe.user_data = cqe->user_data;
+ ocqe->cqe.res = cqe->res;
+ ocqe->cqe.flags = cqe->flags;
+ if (is_cqe32 && big_cqe) {
+ ocqe->cqe.big_cqe[0] = big_cqe->extra1;
+ ocqe->cqe.big_cqe[1] = big_cqe->extra2;
+ }
+ }
+ if (big_cqe)
+ big_cqe->extra1 = big_cqe->extra2 = 0;
+ return ocqe;
+}
+
+/*
+ * Fill an empty dummy CQE, in case alignment is off for posting a 32b CQE
+ * because the ring is a single 16b entry away from wrapping.
+ */
+static bool io_fill_nop_cqe(struct io_ring_ctx *ctx, unsigned int off)
+{
+ if (__io_cqring_events(ctx) < ctx->cq_entries) {
+ struct io_uring_cqe *cqe = &ctx->rings->cqes[off];
+
+ cqe->user_data = 0;
+ cqe->res = 0;
+ cqe->flags = IORING_CQE_F_SKIP;
+ ctx->cached_cq_tail++;
+ return true;
}
- return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- req->extra1, req->extra2);
+ return false;
}
/*
@@ -780,7 +763,7 @@ bool io_req_cqe_overflow(struct io_kiocb *req)
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
+bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32)
{
struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
@@ -792,15 +775,25 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
* Force overflow the completion.
*/
if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
- return NULL;
+ return false;
+
+ /*
+ * Post dummy CQE if a 32b CQE is needed and there's only room for a
+ * 16b CQE before the ring wraps.
+ */
+ if (cqe32 && off + 1 == ctx->cq_entries) {
+ if (!io_fill_nop_cqe(ctx, off))
+ return false;
+ off = 0;
+ }
/* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
free = ctx->cq_entries - queued;
/* we need a contiguous range, limit based on the current array offset */
len = min(free, ctx->cq_entries - off);
- if (!len)
- return NULL;
+ if (len < (cqe32 + 1))
+ return false;
if (ctx->flags & IORING_SETUP_CQE32) {
off <<= 1;
@@ -809,211 +802,214 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
ctx->cqe_cached = &rings->cqes[off];
ctx->cqe_sentinel = ctx->cqe_cached + len;
+ return true;
+}
- ctx->cached_cq_tail++;
- ctx->cqe_cached++;
- if (ctx->flags & IORING_SETUP_CQE32)
- ctx->cqe_cached++;
- return &rings->cqes[off];
+static bool io_fill_cqe_aux32(struct io_ring_ctx *ctx,
+ struct io_uring_cqe src_cqe[2])
+{
+ struct io_uring_cqe *cqe;
+
+ if (WARN_ON_ONCE(!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))))
+ return false;
+ if (unlikely(!io_get_cqe(ctx, &cqe, true)))
+ return false;
+
+ memcpy(cqe, src_cqe, 2 * sizeof(*cqe));
+ trace_io_uring_complete(ctx, NULL, cqe);
+ return true;
}
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
u32 cflags)
{
+ bool cqe32 = cflags & IORING_CQE_F_32;
struct io_uring_cqe *cqe;
- ctx->cq_extra++;
-
- /*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
- */
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
- trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
-
+ if (likely(io_get_cqe(ctx, &cqe, cqe32))) {
WRITE_ONCE(cqe->user_data, user_data);
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, cflags);
- if (ctx->flags & IORING_SETUP_CQE32) {
+ if (cqe32) {
WRITE_ONCE(cqe->big_cqe[0], 0);
WRITE_ONCE(cqe->big_cqe[1], 0);
}
+
+ trace_io_uring_complete(ctx, NULL, cqe);
return true;
}
return false;
}
-static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
+static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags)
{
- struct io_submit_state *state = &ctx->submit_state;
- unsigned int i;
+ return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags };
+}
- lockdep_assert_held(&ctx->uring_lock);
- for (i = 0; i < state->cqes_count; i++) {
- struct io_uring_cqe *cqe = &state->cqes[i];
-
- if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
- if (ctx->task_complete) {
- spin_lock(&ctx->completion_lock);
- io_cqring_event_overflow(ctx, cqe->user_data,
- cqe->res, cqe->flags, 0, 0);
- spin_unlock(&ctx->completion_lock);
- } else {
- io_cqring_event_overflow(ctx, cqe->user_data,
- cqe->res, cqe->flags, 0, 0);
- }
- }
- }
- state->cqes_count = 0;
+static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe,
+ struct io_big_cqe *big_cqe)
+{
+ struct io_overflow_cqe *ocqe;
+
+ ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL);
+ spin_lock(&ctx->completion_lock);
+ io_cqring_add_overflow(ctx, ocqe);
+ spin_unlock(&ctx->completion_lock);
}
-static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
- bool allow_overflow)
+static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx,
+ struct io_cqe *cqe,
+ struct io_big_cqe *big_cqe)
+{
+ struct io_overflow_cqe *ocqe;
+
+ ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC);
+ return io_cqring_add_overflow(ctx, ocqe);
+}
+
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
{
bool filled;
io_cq_lock(ctx);
filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
- if (!filled && allow_overflow)
- filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
+ if (unlikely(!filled)) {
+ struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
+ filled = io_cqe_overflow_locked(ctx, &cqe, NULL);
+ }
io_cq_unlock_post(ctx);
return filled;
}
-bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
+/*
+ * Must be called from inline task_work so we know a flush will happen later,
+ * and obviously with ctx->uring_lock held (tw always has that).
+ */
+void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
{
- return __io_post_aux_cqe(ctx, user_data, res, cflags, true);
+ lockdep_assert_held(&ctx->uring_lock);
+ lockdep_assert(ctx->lockless_cq);
+
+ if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
+ struct io_cqe cqe = io_init_cqe(user_data, res, cflags);
+
+ io_cqe_overflow(ctx, &cqe, NULL);
+ }
+ ctx->submit_state.cq_flush = true;
}
-bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
- bool allow_overflow)
+/*
+ * A helper for multishot requests posting additional CQEs.
+ * Should only be used from a task_work including IO_URING_F_MULTISHOT.
+ */
+bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
{
- struct io_uring_cqe *cqe;
- unsigned int length;
-
- if (!defer)
- return __io_post_aux_cqe(ctx, user_data, res, cflags, allow_overflow);
+ struct io_ring_ctx *ctx = req->ctx;
+ bool posted;
- length = ARRAY_SIZE(ctx->submit_state.cqes);
+ /*
+ * If multishot has already posted deferred completions, ensure that
+ * those are flushed first before posting this one. If not, CQEs
+ * could get reordered.
+ */
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs))
+ __io_submit_flush_completions(ctx);
+ lockdep_assert(!io_wq_current_is_worker());
lockdep_assert_held(&ctx->uring_lock);
- if (ctx->submit_state.cqes_count == length) {
- __io_cq_lock(ctx);
- __io_flush_post_cqes(ctx);
- /* no need to flush - flush is deferred */
- __io_cq_unlock_post(ctx);
+ if (!ctx->lockless_cq) {
+ spin_lock(&ctx->completion_lock);
+ posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
+ spin_unlock(&ctx->completion_lock);
+ } else {
+ posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
}
- /* For defered completions this is not as strict as it is otherwise,
- * however it's main job is to prevent unbounded posted completions,
- * and in that it works just as well.
- */
- if (!allow_overflow && test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
- return false;
-
- cqe = &ctx->submit_state.cqes[ctx->submit_state.cqes_count++];
- cqe->user_data = user_data;
- cqe->res = res;
- cqe->flags = cflags;
- return true;
+ ctx->submit_state.cq_flush = true;
+ return posted;
}
-static void __io_req_complete_post(struct io_kiocb *req)
+/*
+ * A helper for multishot requests posting additional CQEs.
+ * Should only be used from a task_work including IO_URING_F_MULTISHOT.
+ */
+bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe cqe[2])
{
struct io_ring_ctx *ctx = req->ctx;
+ bool posted;
- io_cq_lock(ctx);
- if (!(req->flags & REQ_F_CQE_SKIP))
- io_fill_cqe_req(ctx, req);
+ lockdep_assert(!io_wq_current_is_worker());
+ lockdep_assert_held(&ctx->uring_lock);
- /*
- * If we're the last reference to this request, add to our locked
- * free_list cache.
- */
- if (req_ref_put_and_test(req)) {
- if (req->flags & IO_REQ_LINK_FLAGS) {
- if (req->flags & IO_DISARM_MASK)
- io_disarm_next(req);
- if (req->link) {
- io_req_task_queue(req->link);
- req->link = NULL;
- }
- }
- io_req_put_rsrc(req);
- /*
- * Selected buffer deallocation in io_clean_op() assumes that
- * we don't hold ->completion_lock. Clean them here to avoid
- * deadlocks.
- */
- io_put_kbuf_comp(req);
- io_dismantle_req(req);
- io_put_task(req->task, 1);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
+ cqe[0].user_data = req->cqe.user_data;
+ if (!ctx->lockless_cq) {
+ spin_lock(&ctx->completion_lock);
+ posted = io_fill_cqe_aux32(ctx, cqe);
+ spin_unlock(&ctx->completion_lock);
+ } else {
+ posted = io_fill_cqe_aux32(ctx, cqe);
}
- io_cq_unlock_post(ctx);
+
+ ctx->submit_state.cq_flush = true;
+ return posted;
}
-void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
+static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
{
- if (req->ctx->task_complete && (issue_flags & IO_URING_F_IOWQ)) {
+ struct io_ring_ctx *ctx = req->ctx;
+ bool completed = true;
+
+ /*
+ * All execution paths but io-wq use the deferred completions by
+ * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
+ */
+ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
+ return;
+
+ /*
+ * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
+ * the submitter task context, IOPOLL protects with uring_lock.
+ */
+ if (ctx->lockless_cq || (req->flags & REQ_F_REISSUE)) {
+defer_complete:
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
- !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
- __io_req_complete_post(req);
- } else {
- struct io_ring_ctx *ctx = req->ctx;
-
- mutex_lock(&ctx->uring_lock);
- __io_req_complete_post(req);
- mutex_unlock(&ctx->uring_lock);
+ return;
}
+
+ io_cq_lock(ctx);
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ completed = io_fill_cqe_req(ctx, req);
+ io_cq_unlock_post(ctx);
+
+ if (!completed)
+ goto defer_complete;
+
+ /*
+ * We don't free the request here because we know it's called from
+ * io-wq only, which holds a reference, so it cannot be the last put.
+ */
+ req_ref_put(req);
}
void io_req_defer_failed(struct io_kiocb *req, s32 res)
__must_hold(&ctx->uring_lock)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_cold_def *def = &io_cold_defs[req->opcode];
lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
- io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
+ io_req_set_res(req, res, io_put_kbuf(req, res, NULL));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);
}
/*
- * Don't initialise the fields below on every allocation, but do that in
- * advance and keep them valid across allocations.
- */
-static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
-{
- req->ctx = ctx;
- req->link = NULL;
- req->async_data = NULL;
- /* not necessary, but safer to zero */
- req->cqe.res = 0;
-}
-
-static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
- struct io_submit_state *state)
-{
- spin_lock(&ctx->completion_lock);
- wq_list_splice(&ctx->locked_free_list, &state->free_list);
- ctx->locked_free_nr = 0;
- spin_unlock(&ctx->completion_lock);
-}
-
-/*
* A request might get retired back into the request caches even before opcode
* handlers and io_issue_sqe() are done with it, e.g. inline completion path.
* Because of that, io_alloc_req() should be called only under ->uring_lock
@@ -1022,20 +1018,9 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
__cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
- gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO;
void *reqs[IO_REQ_ALLOC_BATCH];
- int ret, i;
-
- /*
- * If we have more than a batch's worth of requests in our IRQ side
- * locked cache, grab the lock and move them over to our submission
- * side cache.
- */
- if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
- io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
- if (!io_req_cache_empty(ctx))
- return true;
- }
+ int ret;
ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
@@ -1051,37 +1036,24 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
}
percpu_ref_get_many(&ctx->refs, ret);
- for (i = 0; i < ret; i++) {
- struct io_kiocb *req = reqs[i];
+ ctx->nr_req_allocated += ret;
+
+ while (ret--) {
+ struct io_kiocb *req = reqs[ret];
- io_preinit_req(req, ctx);
io_req_add_to_cache(req, ctx);
}
return true;
}
-static inline void io_dismantle_req(struct io_kiocb *req)
-{
- unsigned int flags = req->flags;
-
- if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
- io_clean_op(req);
- if (!(flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
-}
-
__cold void io_free_req(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
-
- io_req_put_rsrc(req);
- io_dismantle_req(req);
- io_put_task(req->task, 1);
-
- spin_lock(&ctx->completion_lock);
- wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
- ctx->locked_free_nr++;
- spin_unlock(&ctx->completion_lock);
+ /* refs were already put, restore them for io_req_task_complete() */
+ req->flags &= ~REQ_F_REFCOUNT;
+ /* we only want to free it, don't post CQEs */
+ req->flags |= REQ_F_CQE_SKIP;
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req);
}
static void __io_req_find_next_prep(struct io_kiocb *req)
@@ -1110,165 +1082,200 @@ static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return nxt;
}
-static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
+static void ctx_flush_and_put(struct io_ring_ctx *ctx, io_tw_token_t tw)
{
if (!ctx)
return;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
- if (*locked) {
- io_submit_flush_completions(ctx);
- mutex_unlock(&ctx->uring_lock);
- *locked = false;
- }
+
+ io_submit_flush_completions(ctx);
+ mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
-static unsigned int handle_tw_list(struct llist_node *node,
- struct io_ring_ctx **ctx, bool *locked,
- struct llist_node *last)
+/*
+ * Run queued task_work, returning the number of entries processed in *count.
+ * If more entries than max_entries are available, stop processing once this
+ * is reached and return the rest of the list.
+ */
+struct llist_node *io_handle_tw_list(struct llist_node *node,
+ unsigned int *count,
+ unsigned int max_entries)
{
- unsigned int count = 0;
+ struct io_ring_ctx *ctx = NULL;
+ struct io_tw_state ts = { };
- while (node != last) {
+ do {
struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
- prefetch(container_of(next, struct io_kiocb, io_task_work.node));
-
- if (req->ctx != *ctx) {
- ctx_flush_and_put(*ctx, locked);
- *ctx = req->ctx;
- /* if not contended, grab and improve batching */
- *locked = mutex_trylock(&(*ctx)->uring_lock);
- percpu_ref_get(&(*ctx)->refs);
+ if (req->ctx != ctx) {
+ ctx_flush_and_put(ctx, ts);
+ ctx = req->ctx;
+ mutex_lock(&ctx->uring_lock);
+ percpu_ref_get(&ctx->refs);
+ ts.cancel = io_should_terminate_tw(ctx);
}
- req->io_task_work.func(req, locked);
+ INDIRECT_CALL_2(req->io_task_work.func,
+ io_poll_task_func, io_req_rw_complete,
+ (struct io_tw_req){req}, ts);
node = next;
- count++;
- }
+ (*count)++;
+ if (unlikely(need_resched())) {
+ ctx_flush_and_put(ctx, ts);
+ ctx = NULL;
+ cond_resched();
+ }
+ } while (node && *count < max_entries);
- return count;
+ ctx_flush_and_put(ctx, ts);
+ return node;
}
-/**
- * io_llist_xchg - swap all entries in a lock-less list
- * @head: the head of lock-less list to delete all entries
- * @new: new entry as the head of the list
- *
- * If list is empty, return NULL, otherwise, return the pointer to the first entry.
- * The order of entries returned is from the newest to the oldest added one.
- */
-static inline struct llist_node *io_llist_xchg(struct llist_head *head,
- struct llist_node *new)
+static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
{
- return xchg(&head->first, new);
-}
+ struct io_ring_ctx *last_ctx = NULL;
+ struct io_kiocb *req;
-/**
- * io_llist_cmpxchg - possibly swap all entries in a lock-less list
- * @head: the head of lock-less list to delete all entries
- * @old: expected old value of the first entry of the list
- * @new: new entry as the head of the list
- *
- * perform a cmpxchg on the first entry of the list.
- */
+ while (node) {
+ req = container_of(node, struct io_kiocb, io_task_work.node);
+ node = node->next;
+ if (last_ctx != req->ctx) {
+ if (last_ctx) {
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
+ last_ctx = req->ctx;
+ percpu_ref_get(&last_ctx->refs);
+ }
+ if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
+ schedule_delayed_work(&last_ctx->fallback_work, 1);
+ }
-static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
- struct llist_node *old,
- struct llist_node *new)
+ if (last_ctx) {
+ if (sync)
+ flush_delayed_work(&last_ctx->fallback_work);
+ percpu_ref_put(&last_ctx->refs);
+ }
+}
+
+static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
{
- return cmpxchg(&head->first, old, new);
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+
+ __io_fallback_tw(node, sync);
}
-void tctx_task_work(struct callback_head *cb)
+struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
+ unsigned int max_entries,
+ unsigned int *count)
{
- bool uring_locked = false;
- struct io_ring_ctx *ctx = NULL;
- struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
- task_work);
- struct llist_node fake = {};
struct llist_node *node;
- unsigned int loops = 1;
- unsigned int count;
-
- if (unlikely(current->flags & PF_EXITING)) {
- io_fallback_tw(tctx);
- return;
- }
- node = io_llist_xchg(&tctx->task_list, &fake);
- count = handle_tw_list(node, &ctx, &uring_locked, NULL);
- node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
- while (node != &fake) {
- loops++;
- node = io_llist_xchg(&tctx->task_list, &fake);
- count += handle_tw_list(node, &ctx, &uring_locked, &fake);
- node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
+ node = llist_del_all(&tctx->task_list);
+ if (node) {
+ node = llist_reverse_order(node);
+ node = io_handle_tw_list(node, count, max_entries);
}
- ctx_flush_and_put(ctx, &uring_locked);
-
- /* relaxed read is enough as only the task itself sets ->in_idle */
- if (unlikely(atomic_read(&tctx->in_idle)))
+ /* relaxed read is enough as only the task itself sets ->in_cancel */
+ if (unlikely(atomic_read(&tctx->in_cancel)))
io_uring_drop_tctx_refs(current);
- trace_io_uring_task_work_run(tctx, count, loops);
+ trace_io_uring_task_work_run(tctx, *count);
+ return node;
}
-static __cold void io_fallback_tw(struct io_uring_task *tctx)
+void tctx_task_work(struct callback_head *cb)
{
- struct llist_node *node = llist_del_all(&tctx->task_list);
- struct io_kiocb *req;
+ struct io_uring_task *tctx;
+ struct llist_node *ret;
+ unsigned int count = 0;
- while (node) {
- req = container_of(node, struct io_kiocb, io_task_work.node);
- node = node->next;
- if (llist_add(&req->io_task_work.node,
- &req->ctx->fallback_llist))
- schedule_delayed_work(&req->ctx->fallback_work, 1);
- }
+ tctx = container_of(cb, struct io_uring_task, task_work);
+ ret = tctx_task_work_run(tctx, UINT_MAX, &count);
+ /* can't happen */
+ WARN_ON_ONCE(ret);
}
-static void io_req_local_work_add(struct io_kiocb *req)
+static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ unsigned nr_wait, nr_tw, nr_tw_prev;
+ struct llist_node *head;
- percpu_ref_get(&ctx->refs);
+ /* See comment above IO_CQ_WAKE_INIT */
+ BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
- if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
- percpu_ref_put(&ctx->refs);
- return;
- }
- /* need it for the following io_cqring_wake() */
- smp_mb__after_atomic();
+ /*
+ * We don't know how many requests there are in the link and whether
+ * they can even be queued lazily, fall back to non-lazy.
+ */
+ if (req->flags & IO_REQ_LINK_FLAGS)
+ flags &= ~IOU_F_TWQ_LAZY_WAKE;
- if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
- io_move_task_work_from_local(ctx);
- percpu_ref_put(&ctx->refs);
- return;
- }
+ guard(rcu)();
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
- atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ head = READ_ONCE(ctx->work_llist.first);
+ do {
+ nr_tw_prev = 0;
+ if (head) {
+ struct io_kiocb *first_req = container_of(head,
+ struct io_kiocb,
+ io_task_work.node);
+ /*
+ * Might be executed at any moment, rely on
+ * SLAB_TYPESAFE_BY_RCU to keep it alive.
+ */
+ nr_tw_prev = READ_ONCE(first_req->nr_tw);
+ }
- if (ctx->has_evfd)
- io_eventfd_signal(ctx);
- __io_cqring_wake(ctx);
- percpu_ref_put(&ctx->refs);
+ /*
+ * Theoretically, it can overflow, but that's fine as one of
+ * previous adds should've tried to wake the task.
+ */
+ nr_tw = nr_tw_prev + 1;
+ if (!(flags & IOU_F_TWQ_LAZY_WAKE))
+ nr_tw = IO_CQ_WAKE_FORCE;
+
+ req->nr_tw = nr_tw;
+ req->io_task_work.node.next = head;
+ } while (!try_cmpxchg(&ctx->work_llist.first, &head,
+ &req->io_task_work.node));
+
+ /*
+ * cmpxchg implies a full barrier, which pairs with the barrier
+ * in set_current_state() on the io_cqring_wait() side. It's used
+ * to ensure that either we see updated ->cq_wait_nr, or waiters
+ * going to sleep will observe the work added to the list, which
+ * is similar to the wait/wawke task state sync.
+ */
+
+ if (!head) {
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+ atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ if (ctx->has_evfd)
+ io_eventfd_signal(ctx, false);
+ }
+
+ nr_wait = atomic_read(&ctx->cq_wait_nr);
+ /* not enough or no one is waiting */
+ if (nr_tw < nr_wait)
+ return;
+ /* the previous add has already woken it up */
+ if (nr_tw_prev >= nr_wait)
+ return;
+ wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}
-void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
+static void io_req_normal_work_add(struct io_kiocb *req)
{
- struct io_uring_task *tctx = req->task->io_uring;
+ struct io_uring_task *tctx = req->tctx;
struct io_ring_ctx *ctx = req->ctx;
- if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
- io_req_local_work_add(req);
- return;
- }
-
/* task_work already pending, we're done */
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return;
@@ -1276,99 +1283,155 @@ void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
- if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
+ /* SQPOLL doesn't need the task_work added, it'll run it itself */
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ __set_notify_signal(tctx->task);
+ return;
+ }
+
+ if (likely(!task_work_add(tctx->task, &tctx->task_work, ctx->notify_method)))
return;
- io_fallback_tw(tctx);
+ io_fallback_tw(tctx, false);
}
-static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
- struct llist_node *node;
+ if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ io_req_local_work_add(req, flags);
+ else
+ io_req_normal_work_add(req);
+}
- node = llist_del_all(&ctx->work_llist);
- while (node) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags)
+{
+ if (WARN_ON_ONCE(!(req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
+ return;
+ __io_req_task_work_add(req, flags);
+}
- node = node->next;
- __io_req_task_work_add(req, false);
- }
+static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
+{
+ struct llist_node *node = llist_del_all(&ctx->work_llist);
+
+ __io_fallback_tw(node, false);
+ node = llist_del_all(&ctx->retry_llist);
+ __io_fallback_tw(node, false);
}
-int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
+static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
+ int min_events)
{
- struct llist_node *node;
- struct llist_node fake;
- struct llist_node *current_final = NULL;
- int ret;
- unsigned int loops = 1;
+ if (!io_local_work_pending(ctx))
+ return false;
+ if (events < min_events)
+ return true;
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+ atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ return false;
+}
- if (unlikely(ctx->submitter_task != current))
- return -EEXIST;
+static int __io_run_local_work_loop(struct llist_node **node,
+ io_tw_token_t tw,
+ int events)
+{
+ int ret = 0;
- node = io_llist_xchg(&ctx->work_llist, &fake);
- ret = 0;
-again:
- while (node != current_final) {
- struct llist_node *next = node->next;
- struct io_kiocb *req = container_of(node, struct io_kiocb,
+ while (*node) {
+ struct llist_node *next = (*node)->next;
+ struct io_kiocb *req = container_of(*node, struct io_kiocb,
io_task_work.node);
- prefetch(container_of(next, struct io_kiocb, io_task_work.node));
- req->io_task_work.func(req, locked);
- ret++;
- node = next;
+ INDIRECT_CALL_2(req->io_task_work.func,
+ io_poll_task_func, io_req_rw_complete,
+ (struct io_tw_req){req}, tw);
+ *node = next;
+ if (++ret >= events)
+ break;
}
+ return ret;
+}
+
+static int __io_run_local_work(struct io_ring_ctx *ctx, io_tw_token_t tw,
+ int min_events, int max_events)
+{
+ struct llist_node *node;
+ unsigned int loops = 0;
+ int ret = 0;
+
+ if (WARN_ON_ONCE(ctx->submitter_task != current))
+ return -EEXIST;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+again:
+ tw.cancel = io_should_terminate_tw(ctx);
+ min_events -= ret;
+ ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events);
+ if (ctx->retry_llist.first)
+ goto retry_done;
+
+ /*
+ * llists are in reverse order, flip it back the right way before
+ * running the pending items.
+ */
+ node = llist_reverse_order(llist_del_all(&ctx->work_llist));
+ ret += __io_run_local_work_loop(&node, tw, max_events - ret);
+ ctx->retry_llist.first = node;
+ loops++;
- node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
- if (node != &fake) {
- loops++;
- current_final = &fake;
- node = io_llist_xchg(&ctx->work_llist, &fake);
+ if (io_run_local_work_continue(ctx, ret, min_events))
+ goto again;
+retry_done:
+ io_submit_flush_completions(ctx);
+ if (io_run_local_work_continue(ctx, ret, min_events))
goto again;
- }
- if (*locked)
- io_submit_flush_completions(ctx);
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
-
}
-int io_run_local_work(struct io_ring_ctx *ctx)
+static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
+ int min_events)
{
- bool locked;
- int ret;
+ struct io_tw_state ts = {};
- if (llist_empty(&ctx->work_llist))
+ if (!io_local_work_pending(ctx))
return 0;
+ return __io_run_local_work(ctx, ts, min_events,
+ max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
+}
- __set_current_state(TASK_RUNNING);
- locked = mutex_trylock(&ctx->uring_lock);
- ret = __io_run_local_work(ctx, &locked);
- if (locked)
- mutex_unlock(&ctx->uring_lock);
+int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events)
+{
+ struct io_tw_state ts = {};
+ int ret;
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_run_local_work(ctx, ts, min_events, max_events);
+ mutex_unlock(&ctx->uring_lock);
return ret;
}
-static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
+static void io_req_task_cancel(struct io_tw_req tw_req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, locked);
+ struct io_kiocb *req = tw_req.req;
+
+ io_tw_lock(req->ctx, tw);
io_req_defer_failed(req, req->cqe.res);
}
-void io_req_task_submit(struct io_kiocb *req, bool *locked)
+void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, locked);
- /* req->task == current here, checking PF_EXITING is safe */
- if (likely(!(req->task->flags & PF_EXITING)))
- io_queue_sqe(req);
- else
+ struct io_kiocb *req = tw_req.req;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_tw_lock(ctx, tw);
+ if (unlikely(tw.cancel))
io_req_defer_failed(req, -EFAULT);
+ else if (req->flags & REQ_F_FORCE_ASYNC)
+ io_queue_iowq(req);
+ else
+ io_queue_sqe(req, 0);
}
void io_req_task_queue_fail(struct io_kiocb *req, int ret)
@@ -1392,17 +1455,31 @@ void io_queue_next(struct io_kiocb *req)
io_req_task_queue(nxt);
}
-void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
- __must_hold(&ctx->uring_lock)
+static inline void io_req_put_rsrc_nodes(struct io_kiocb *req)
{
- struct task_struct *task = NULL;
- int task_refs = 0;
+ if (req->file_node) {
+ io_put_rsrc_node(req->ctx, req->file_node);
+ req->file_node = NULL;
+ }
+ if (req->flags & REQ_F_BUF_NODE)
+ io_put_rsrc_node(req->ctx, req->buf_node);
+}
+static void io_free_batch_list(struct io_ring_ctx *ctx,
+ struct io_wq_work_node *node)
+ __must_hold(&ctx->uring_lock)
+{
do {
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
+ if (req->flags & REQ_F_REISSUE) {
+ node = req->comp_list.next;
+ req->flags &= ~REQ_F_REISSUE;
+ io_queue_iowq(req);
+ continue;
+ }
if (req->flags & REQ_F_REFCOUNT) {
node = req->comp_list.next;
if (!req_ref_put_and_test(req))
@@ -1413,8 +1490,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll)
kfree(apoll->double_poll);
- if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
- kfree(apoll);
+ io_cache_free(&ctx->apoll_cache, apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -1422,73 +1498,50 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
io_clean_op(req);
}
- if (!(req->flags & REQ_F_FIXED_FILE))
- io_put_file(req->file);
+ io_put_file(req);
+ io_req_put_rsrc_nodes(req);
+ io_put_task(req);
- io_req_put_rsrc_locked(req, ctx);
-
- if (req->task != task) {
- if (task)
- io_put_task(task, task_refs);
- task = req->task;
- task_refs = 0;
- }
- task_refs++;
node = req->comp_list.next;
io_req_add_to_cache(req, ctx);
} while (node);
-
- if (task)
- io_put_task(task, task_refs);
}
-static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
+void __io_submit_flush_completions(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
- struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state;
+ struct io_wq_work_node *node;
__io_cq_lock(ctx);
- /* must come first to preserve CQE ordering in failure cases */
- if (state->cqes_count)
- __io_flush_post_cqes(ctx);
- wq_list_for_each(node, prev, &state->compl_reqs) {
+ __wq_list_for_each(node, &state->compl_reqs) {
struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
- if (!(req->flags & REQ_F_CQE_SKIP) &&
- unlikely(!__io_fill_cqe_req(ctx, req))) {
- if (ctx->task_complete) {
- spin_lock(&ctx->completion_lock);
- io_req_cqe_overflow(req);
- spin_unlock(&ctx->completion_lock);
- } else {
- io_req_cqe_overflow(req);
- }
+ /*
+ * Requests marked with REQUEUE should not post a CQE, they
+ * will go through the io-wq retry machinery and post one
+ * later.
+ */
+ if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) &&
+ unlikely(!io_fill_cqe_req(ctx, req))) {
+ if (ctx->lockless_cq)
+ io_cqe_overflow(ctx, &req->cqe, &req->big_cqe);
+ else
+ io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe);
}
}
__io_cq_unlock_post(ctx);
- if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {
+ if (!wq_list_empty(&state->compl_reqs)) {
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
}
-}
-/*
- * Drop reference to request, return next in chain (if there is one) if this
- * was the last reference to this request.
- */
-static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
-{
- struct io_kiocb *nxt = NULL;
+ if (unlikely(ctx->drain_active))
+ io_queue_deferred(ctx);
- if (req_ref_put_and_test(req)) {
- if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
- nxt = io_req_find_next(req);
- io_free_req(req);
- }
- return nxt;
+ ctx->submit_state.cq_flush = false;
}
static unsigned io_cqring_events(struct io_ring_ctx *ctx)
@@ -1502,7 +1555,7 @@ static unsigned io_cqring_events(struct io_ring_ctx *ctx)
* We can't just wait for polled events to come to us, we have to actively
* find and complete them.
*/
-static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
+__cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_IOPOLL))
return;
@@ -1524,21 +1577,27 @@ static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
}
}
mutex_unlock(&ctx->uring_lock);
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ io_move_task_work_from_local(ctx);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned int min_events)
{
unsigned int nr_events = 0;
- int ret = 0;
unsigned long check_cq;
+ min_events = min(min_events, ctx->cq_entries);
+
+ lockdep_assert_held(&ctx->uring_lock);
+
if (!io_allowed_run_tw(ctx))
return -EEXIST;
check_cq = READ_ONCE(ctx->check_cq);
if (unlikely(check_cq)) {
if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- __io_cqring_overflow_flush(ctx);
+ __io_cqring_overflow_flush(ctx, false);
/*
* Similarly do not spin if we have not informed the user of any
* dropped CQE.
@@ -1555,6 +1614,8 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
return 0;
do {
+ int ret = 0;
+
/*
* If a submit got punted to a workqueue, we can have the
* application entering polling for a command before it gets
@@ -1569,7 +1630,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail;
- (void) io_run_local_work_locked(ctx);
+ (void) io_run_local_work_locked(ctx, min_events);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
@@ -1582,22 +1643,24 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
wq_list_empty(&ctx->iopoll_list))
break;
}
- ret = io_do_iopoll(ctx, !min);
- if (ret < 0)
+ ret = io_do_iopoll(ctx, !min_events);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (task_sigpending(current))
+ return -EINTR;
+ if (need_resched())
break;
+
nr_events += ret;
- ret = 0;
- } while (nr_events < min && !need_resched());
+ } while (nr_events < min_events);
- return ret;
+ return 0;
}
-void io_req_task_complete(struct io_kiocb *req, bool *locked)
+void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
- if (*locked)
- io_req_complete_defer(req);
- else
- io_req_complete_post(req, IO_URING_F_UNLOCKED);
+ io_req_complete_defer(tw_req.req);
}
/*
@@ -1655,174 +1718,47 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
}
}
-static bool io_bdev_nowait(struct block_device *bdev)
-{
- return !bdev || bdev_nowait(bdev);
-}
-
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
-static bool __io_file_supports_nowait(struct file *file, umode_t mode)
+io_req_flags_t io_file_get_flags(struct file *file)
{
- if (S_ISBLK(mode)) {
- if (IS_ENABLED(CONFIG_BLOCK) &&
- io_bdev_nowait(I_BDEV(file->f_mapping->host)))
- return true;
- return false;
- }
- if (S_ISSOCK(mode))
- return true;
- if (S_ISREG(mode)) {
- if (IS_ENABLED(CONFIG_BLOCK) &&
- io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
- !io_is_uring_fops(file))
- return true;
- return false;
- }
+ io_req_flags_t res = 0;
- /* any ->read/write should understand O_NONBLOCK */
- if (file->f_flags & O_NONBLOCK)
- return true;
- return file->f_mode & FMODE_NOWAIT;
-}
-
-/*
- * If we tracked the file through the SCM inflight mechanism, we could support
- * any file. For now, just ensure that anything potentially problematic is done
- * inline.
- */
-unsigned int io_file_get_flags(struct file *file)
-{
- umode_t mode = file_inode(file)->i_mode;
- unsigned int res = 0;
+ BUILD_BUG_ON(REQ_F_ISREG_BIT != REQ_F_SUPPORT_NOWAIT_BIT + 1);
- if (S_ISREG(mode))
- res |= FFS_ISREG;
- if (__io_file_supports_nowait(file, mode))
- res |= FFS_NOWAIT;
+ if (S_ISREG(file_inode(file)->i_mode))
+ res |= REQ_F_ISREG;
+ if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
+ res |= REQ_F_SUPPORT_NOWAIT;
return res;
}
-bool io_alloc_async_data(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
- req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
- if (req->async_data) {
- req->flags |= REQ_F_ASYNC_DATA;
- return false;
- }
- return true;
-}
-
-int io_req_prep_async(struct io_kiocb *req)
-{
- const struct io_op_def *def = &io_op_defs[req->opcode];
-
- /* assign early for deferred execution for non-fixed file */
- if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
- req->file = io_file_get_normal(req, req->cqe.fd);
- if (!def->prep_async)
- return 0;
- if (WARN_ON_ONCE(req_has_async_data(req)))
- return -EFAULT;
- if (!io_op_defs[req->opcode].manual_alloc) {
- if (io_alloc_async_data(req))
- return -EAGAIN;
- }
- return def->prep_async(req);
-}
-
-static u32 io_get_sequence(struct io_kiocb *req)
-{
- u32 seq = req->ctx->cached_sq_head;
- struct io_kiocb *cur;
-
- /* need original cached_sq_head, but it was increased for each req */
- io_for_each_link(cur, req)
- seq--;
- return seq;
-}
-
static __cold void io_drain_req(struct io_kiocb *req)
__must_hold(&ctx->uring_lock)
{
struct io_ring_ctx *ctx = req->ctx;
+ bool drain = req->flags & IOSQE_IO_DRAIN;
struct io_defer_entry *de;
- int ret;
- u32 seq = io_get_sequence(req);
- /* Still need defer if there is pending req in defer list. */
- spin_lock(&ctx->completion_lock);
- if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
- spin_unlock(&ctx->completion_lock);
-queue:
- ctx->drain_active = false;
- io_req_task_queue(req);
- return;
- }
- spin_unlock(&ctx->completion_lock);
-
- io_prep_async_link(req);
- de = kmalloc(sizeof(*de), GFP_KERNEL);
+ de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
if (!de) {
- ret = -ENOMEM;
- io_req_defer_failed(req, ret);
+ io_req_defer_failed(req, -ENOMEM);
return;
}
- spin_lock(&ctx->completion_lock);
- if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
- spin_unlock(&ctx->completion_lock);
- kfree(de);
- goto queue;
- }
-
+ io_prep_async_link(req);
trace_io_uring_defer(req);
de->req = req;
- de->seq = seq;
- list_add_tail(&de->list, &ctx->defer_list);
- spin_unlock(&ctx->completion_lock);
-}
-
-static void io_clean_op(struct io_kiocb *req)
-{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- spin_lock(&req->ctx->completion_lock);
- io_put_kbuf_comp(req);
- spin_unlock(&req->ctx->completion_lock);
- }
- if (req->flags & REQ_F_NEED_CLEANUP) {
- const struct io_op_def *def = &io_op_defs[req->opcode];
-
- if (def->cleanup)
- def->cleanup(req);
- }
- if ((req->flags & REQ_F_POLLED) && req->apoll) {
- kfree(req->apoll->double_poll);
- kfree(req->apoll);
- req->apoll = NULL;
- }
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_uring_task *tctx = req->task->io_uring;
-
- atomic_dec(&tctx->inflight_tracked);
- }
- if (req->flags & REQ_F_CREDS)
- put_cred(req->creds);
- if (req->flags & REQ_F_ASYNC_DATA) {
- kfree(req->async_data);
- req->async_data = NULL;
- }
- req->flags &= ~IO_REQ_CLEAN_FLAGS;
+ ctx->nr_drained += io_linked_nr(req);
+ list_add_tail(&de->list, &ctx->defer_list);
+ io_queue_deferred(ctx);
+ if (!drain && list_empty(&ctx->defer_list))
+ ctx->drain_active = false;
}
-static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
+ unsigned int issue_flags)
{
- if (req->file || !io_op_defs[req->opcode].needs_file)
+ if (req->file || !def->needs_file)
return true;
if (req->flags & REQ_F_FIXED_FILE)
@@ -1833,17 +1769,22 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
return !!req->file;
}
-static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
+
+static inline int __io_issue_sqe(struct io_kiocb *req,
+ unsigned int issue_flags,
+ const struct io_issue_def *def)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
const struct cred *creds = NULL;
+ struct io_kiocb *link = NULL;
int ret;
- if (unlikely(!io_assign_file(req, issue_flags)))
- return -EBADF;
-
- if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
- creds = override_creds(req->creds);
+ if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
+ if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
+ creds = override_creds(req->creds);
+ if (req->flags & REQ_F_ARM_LTIMEOUT)
+ link = __io_prep_linked_timeout(req);
+ }
if (!def->audit_skip)
audit_uring_entry(req->opcode);
@@ -1853,71 +1794,130 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
if (!def->audit_skip)
audit_uring_exit(!ret, ret);
- if (creds)
- revert_creds(creds);
+ if (unlikely(creds || link)) {
+ if (creds)
+ revert_creds(creds);
+ if (link)
+ io_queue_linked_timeout(link);
+ }
+
+ return ret;
+}
- if (ret == IOU_OK) {
+static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ int ret;
+
+ if (unlikely(!io_assign_file(req, def, issue_flags)))
+ return -EBADF;
+
+ ret = __io_issue_sqe(req, issue_flags, def);
+
+ if (ret == IOU_COMPLETE) {
if (issue_flags & IO_URING_F_COMPLETE_DEFER)
io_req_complete_defer(req);
else
io_req_complete_post(req, issue_flags);
- } else if (ret != IOU_ISSUE_SKIP_COMPLETE)
- return ret;
- /* If the op doesn't have a file, we're not polling for it */
- if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
- io_iopoll_req_issued(req, issue_flags);
+ return 0;
+ }
- return 0;
+ if (ret == IOU_ISSUE_SKIP_COMPLETE) {
+ ret = 0;
+
+ /* If the op doesn't have a file, we're not polling for it */
+ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
+ io_iopoll_req_issued(req, issue_flags);
+ }
+ return ret;
}
-int io_poll_issue(struct io_kiocb *req, bool *locked)
+int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw)
{
- io_tw_lock(req->ctx, locked);
- return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
- IO_URING_F_COMPLETE_DEFER);
+ const unsigned int issue_flags = IO_URING_F_NONBLOCK |
+ IO_URING_F_MULTISHOT |
+ IO_URING_F_COMPLETE_DEFER;
+ int ret;
+
+ io_tw_lock(req->ctx, tw);
+
+ WARN_ON_ONCE(!req->file);
+ if (WARN_ON_ONCE(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EFAULT;
+
+ ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
+
+ WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE);
+ return ret;
}
struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *nxt = NULL;
- req = io_put_req_find_next(req);
- return req ? &req->work : NULL;
+ if (req_ref_put_and_test_atomic(req)) {
+ if (req->flags & IO_REQ_LINK_FLAGS)
+ nxt = io_req_find_next(req);
+ io_free_req(req);
+ }
+ return nxt ? &nxt->work : NULL;
}
void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- const struct io_op_def *def = &io_op_defs[req->opcode];
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
bool needs_poll = false;
int ret = 0, err = -ECANCELED;
- /* one will be dropped by ->io_wq_free_work() after returning to io-wq */
+ /* one will be dropped by io_wq_free_work() after returning to io-wq */
if (!(req->flags & REQ_F_REFCOUNT))
__io_req_set_refcount(req, 2);
else
req_ref_get(req);
- io_arm_ltimeout(req);
-
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
- if (work->flags & IO_WQ_WORK_CANCEL) {
+ if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
fail:
io_req_task_queue_fail(req, err);
return;
}
- if (!io_assign_file(req, issue_flags)) {
+ if (!io_assign_file(req, def, issue_flags)) {
err = -EBADF;
- work->flags |= IO_WQ_WORK_CANCEL;
+ atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
goto fail;
}
+ /*
+ * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
+ * submitter task context. Final request completions are handed to the
+ * right context, however this is not the case of auxiliary CQEs,
+ * which is the main mean of operation for multishot requests.
+ * Don't allow any multishot execution from io-wq. It's more restrictive
+ * than necessary and also cleaner.
+ */
+ if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
+ err = -EBADFD;
+ if (!io_file_can_poll(req))
+ goto fail;
+ if (req->file->f_flags & O_NONBLOCK ||
+ req->file->f_mode & FMODE_NOWAIT) {
+ err = -ECANCELED;
+ if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
+ goto fail;
+ return;
+ } else {
+ req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
+ }
+ }
+
if (req->flags & REQ_F_FORCE_ASYNC) {
bool opcode_poll = def->pollin || def->pollout;
- if (opcode_poll && file_can_poll(req->file)) {
+ if (opcode_poll && io_file_can_poll(req)) {
needs_poll = true;
issue_flags |= IO_URING_F_NONBLOCK;
}
@@ -1927,6 +1927,14 @@ fail:
ret = io_issue_sqe(req, issue_flags);
if (ret != -EAGAIN)
break;
+
+ /*
+ * If REQ_F_NOWAIT is set, then don't wait or retry with
+ * poll. -EAGAIN is final for that case.
+ */
+ if (req->flags & REQ_F_NOWAIT)
+ break;
+
/*
* We can get EAGAIN for iopolled IO even though we're
* forcing a sync submission from here, since we can't
@@ -1935,6 +1943,8 @@ fail:
if (!needs_poll) {
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
+ if (io_wq_worker_stopped())
+ break;
cond_resched();
continue;
}
@@ -1947,7 +1957,7 @@ fail:
} while (1);
/* avoid locking problems by failing it from a clean context */
- if (ret < 0)
+ if (ret)
io_req_task_queue_fail(req, ret);
}
@@ -1955,21 +1965,17 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_rsrc_node *node;
struct file *file = NULL;
- unsigned long file_ptr;
io_ring_submit_lock(ctx, issue_flags);
-
- if (unlikely((unsigned int)fd >= ctx->nr_user_files))
- goto out;
- fd = array_index_nospec(fd, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
- file = (struct file *) (file_ptr & FFS_MASK);
- file_ptr &= ~FFS_MASK;
- /* mask in overlapping REQ_F and FFS bits */
- req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
- io_req_set_rsrc_node(req, ctx, 0);
-out:
+ node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
+ if (node) {
+ node->refs++;
+ req->file_node = node;
+ req->flags |= io_slot_flags(node);
+ file = io_slot_file(node);
+ }
io_ring_submit_unlock(ctx, issue_flags);
return file;
}
@@ -1986,50 +1992,61 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd)
return file;
}
-static void io_queue_async(struct io_kiocb *req, int ret)
- __must_hold(&req->ctx->uring_lock)
+static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_kiocb *linked_timeout;
+ const struct io_cold_def *def = &io_cold_defs[req->opcode];
+ if (req->flags & REQ_F_SQE_COPIED)
+ return 0;
+ req->flags |= REQ_F_SQE_COPIED;
+ if (!def->sqe_copy)
+ return 0;
+ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE)))
+ return -EFAULT;
+ def->sqe_copy(req);
+ return 0;
+}
+
+static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret)
+ __must_hold(&req->ctx->uring_lock)
+{
if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
+fail:
io_req_defer_failed(req, ret);
return;
}
- linked_timeout = io_prep_linked_timeout(req);
+ ret = io_req_sqe_copy(req, issue_flags);
+ if (unlikely(ret))
+ goto fail;
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
- io_kbuf_recycle(req, 0);
io_req_task_queue(req);
break;
case IO_APOLL_ABORTED:
- io_kbuf_recycle(req, 0);
- io_queue_iowq(req, NULL);
+ io_queue_iowq(req);
break;
case IO_APOLL_OK:
break;
}
-
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
}
-static inline void io_queue_sqe(struct io_kiocb *req)
+static inline void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags)
__must_hold(&req->ctx->uring_lock)
{
+ unsigned int issue_flags = IO_URING_F_NONBLOCK |
+ IO_URING_F_COMPLETE_DEFER | extra_flags;
int ret;
- ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
+ ret = io_issue_sqe(req, issue_flags);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
- if (likely(!ret))
- io_arm_ltimeout(req);
- else
- io_queue_async(req, ret);
+ if (unlikely(ret))
+ io_queue_async(req, issue_flags, ret);
}
static void io_queue_sqe_fallback(struct io_kiocb *req)
@@ -2044,17 +2061,12 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
req->flags |= REQ_F_LINK;
io_req_defer_failed(req, req->cqe.res);
} else {
- int ret = io_req_prep_async(req);
-
- if (unlikely(ret)) {
- io_req_defer_failed(req, ret);
- return;
- }
-
+ /* can't fail with IO_URING_F_INLINE */
+ io_req_sqe_copy(req, IO_URING_F_INLINE);
if (unlikely(req->ctx->drain_active))
io_drain_req(req);
else
- io_queue_iowq(req, NULL);
+ io_queue_iowq(req);
}
}
@@ -2081,9 +2093,8 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx,
return true;
}
-static void io_init_req_drain(struct io_kiocb *req)
+static void io_init_drain(struct io_ring_ctx *ctx)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *head = ctx->submit_state.link.head;
ctx->drain_active = true;
@@ -2100,49 +2111,78 @@ static void io_init_req_drain(struct io_kiocb *req)
}
}
+static __cold int io_init_fail_req(struct io_kiocb *req, int err)
+{
+ /* ensure per-opcode data is cleared if we fail before prep */
+ memset(&req->cmd.data, 0, sizeof(req->cmd.data));
+ return err;
+}
+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+ const struct io_uring_sqe *sqe, unsigned int *left)
__must_hold(&ctx->uring_lock)
{
- const struct io_op_def *def;
+ const struct io_issue_def *def;
unsigned int sqe_flags;
int personality;
u8 opcode;
- /* req is partially pre-initialised, see io_preinit_req() */
+ req->ctx = ctx;
req->opcode = opcode = READ_ONCE(sqe->opcode);
/* same numerical values with corresponding REQ_F_*, safe to copy */
- req->flags = sqe_flags = READ_ONCE(sqe->flags);
+ sqe_flags = READ_ONCE(sqe->flags);
+ req->flags = (__force io_req_flags_t) sqe_flags;
req->cqe.user_data = READ_ONCE(sqe->user_data);
req->file = NULL;
- req->rsrc_node = NULL;
- req->task = current;
+ req->tctx = current->io_uring;
+ req->cancel_seq_set = false;
+ req->async_data = NULL;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
}
- def = &io_op_defs[opcode];
+ opcode = array_index_nospec(opcode, IORING_OP_LAST);
+
+ def = &io_issue_defs[opcode];
+ if (def->is_128 && !(ctx->flags & IORING_SETUP_SQE128)) {
+ /*
+ * A 128b op on a non-128b SQ requires mixed SQE support as
+ * well as 2 contiguous entries.
+ */
+ if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 ||
+ !(ctx->cached_sq_head & (ctx->sq_entries - 1)))
+ return io_init_fail_req(req, -EINVAL);
+ /*
+ * A 128b operation on a mixed SQ uses two entries, so we have
+ * to increment the head and cached refs, and decrement what's
+ * left.
+ */
+ current->io_uring->cached_refs++;
+ ctx->cached_sq_head++;
+ (*left)--;
+ }
+
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (sqe_flags & IOSQE_BUFFER_SELECT) {
if (!def->buffer_select)
- return -EOPNOTSUPP;
+ return io_init_fail_req(req, -EOPNOTSUPP);
req->buf_index = READ_ONCE(sqe->buf_group);
}
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
- return -EOPNOTSUPP;
- io_init_req_drain(req);
+ return io_init_fail_req(req, -EOPNOTSUPP);
+ io_init_drain(ctx);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
- return -EACCES;
+ return io_init_fail_req(req, -EACCES);
/* knock it to the slow queue path, will be drained there */
if (ctx->drain_active)
req->flags |= REQ_F_FORCE_ASYNC;
@@ -2155,9 +2195,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
if (!def->ioprio && sqe->ioprio)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (def->needs_file) {
struct io_submit_state *state = &ctx->submit_state;
@@ -2181,12 +2221,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->creds = xa_load(&ctx->personalities, personality);
if (!req->creds)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
get_cred(req->creds);
ret = security_uring_override_creds(req->creds);
if (ret) {
put_cred(req->creds);
- return ret;
+ return io_init_fail_req(req, ret);
}
req->flags |= REQ_F_CREDS;
}
@@ -2232,18 +2272,17 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
}
static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+ const struct io_uring_sqe *sqe, unsigned int *left)
__must_hold(&ctx->uring_lock)
{
struct io_submit_link *link = &ctx->submit_state.link;
int ret;
- ret = io_init_req(ctx, req, sqe);
+ ret = io_init_req(ctx, req, sqe, left);
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
- /* don't need @sqe from now on */
- trace_io_uring_submit_sqe(req, true);
+ trace_io_uring_submit_req(req);
/*
* If we already have a head request, queue this one for async
@@ -2253,11 +2292,8 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
* conditions are true (normal request), then just queue it.
*/
if (unlikely(link->head)) {
- ret = io_req_prep_async(req);
- if (unlikely(ret))
- return io_submit_fail_init(sqe, req, ret);
-
- trace_io_uring_link(req, link->head);
+ trace_io_uring_link(req, link->last);
+ io_req_sqe_copy(req, IO_URING_F_INLINE);
link->last->link = req;
link->last = req;
@@ -2281,7 +2317,7 @@ fallback:
return 0;
}
- io_queue_sqe(req);
+ io_queue_sqe(req, IO_URING_F_INLINE);
return 0;
}
@@ -2333,10 +2369,21 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
* used, it's important that those reads are done through READ_ONCE() to
* prevent a re-load down the line.
*/
-static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
+static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
{
- unsigned head, mask = ctx->sq_entries - 1;
- unsigned sq_idx = ctx->cached_sq_head++ & mask;
+ unsigned mask = ctx->sq_entries - 1;
+ unsigned head = ctx->cached_sq_head++ & mask;
+
+ if (static_branch_unlikely(&io_key_has_sqarray) &&
+ (!(ctx->flags & IORING_SETUP_NO_SQARRAY))) {
+ head = READ_ONCE(ctx->sq_array[head]);
+ if (unlikely(head >= ctx->sq_entries)) {
+ WRITE_ONCE(ctx->rings->sq_dropped,
+ READ_ONCE(ctx->rings->sq_dropped) + 1);
+ return false;
+ }
+ head = array_index_nospec(head, ctx->sq_entries);
+ }
/*
* The cached sq head (or cq tail) serves two purposes:
@@ -2346,19 +2393,12 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
* 2) allows the kernel side to track the head on its own, even
* though the application is the one updating it.
*/
- head = READ_ONCE(ctx->sq_array[sq_idx]);
- if (likely(head < ctx->sq_entries)) {
- /* double index for 128-byte SQEs, twice as long */
- if (ctx->flags & IORING_SETUP_SQE128)
- head <<= 1;
- return &ctx->sq_sqes[head];
- }
- /* drop invalid entries */
- ctx->cq_extra--;
- WRITE_ONCE(ctx->rings->sq_dropped,
- READ_ONCE(ctx->rings->sq_dropped) + 1);
- return NULL;
+ /* double index for 128-byte SQEs, twice as long */
+ if (ctx->flags & IORING_SETUP_SQE128)
+ head <<= 1;
+ *sqe = &ctx->sq_sqes[head];
+ return true;
}
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
@@ -2368,10 +2408,11 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
unsigned int left;
int ret;
+ entries = min(nr, entries);
if (unlikely(!entries))
return 0;
- /* make sure SQ entry isn't read before tail */
- ret = left = min3(nr, ctx->sq_entries, entries);
+
+ ret = left = entries;
io_get_task_refs(left);
io_submit_state_start(&ctx->submit_state, left);
@@ -2379,11 +2420,9 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
- if (unlikely(!io_alloc_req_refill(ctx)))
+ if (unlikely(!io_alloc_req(ctx, &req)))
break;
- req = io_alloc_req(ctx);
- sqe = io_get_sqe(ctx);
- if (unlikely(!sqe)) {
+ if (unlikely(!io_get_sqe(ctx, &sqe))) {
io_req_add_to_cache(req, ctx);
break;
}
@@ -2392,7 +2431,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
* Continue submitting even for sqe failure if the
* ring was setup with IORING_SETUP_SUBMIT_ALL
*/
- if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
+ if (unlikely(io_submit_sqe(ctx, req, sqe, &left)) &&
!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
left--;
break;
@@ -2413,366 +2452,485 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
return ret;
}
-struct io_wait_queue {
- struct wait_queue_entry wq;
- struct io_ring_ctx *ctx;
- unsigned cq_tail;
- unsigned nr_timeouts;
-};
-
-static inline bool io_has_work(struct io_ring_ctx *ctx)
-{
- return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
- ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
- !llist_empty(&ctx->work_llist));
-}
-
-static inline bool io_should_wake(struct io_wait_queue *iowq)
-{
- struct io_ring_ctx *ctx = iowq->ctx;
- int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
-
- /*
- * Wake up if we have enough events, or if a timeout occurred since we
- * started waiting. For timeouts, we always want to return to userspace,
- * regardless of event count.
- */
- return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
-}
-
static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int wake_flags, void *key)
{
- struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
- wq);
- struct io_ring_ctx *ctx = iowq->ctx;
+ struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
/*
* Cannot safely flush overflowed CQEs from here, ensure we wake up
* the task, and the next invocation will do it.
*/
- if (io_should_wake(iowq) || io_has_work(ctx))
+ if (io_should_wake(iowq) || io_has_work(iowq->ctx))
return autoremove_wake_function(curr, mode, wake_flags, key);
return -1;
}
int io_run_task_work_sig(struct io_ring_ctx *ctx)
{
- if (io_run_task_work_ctx(ctx) > 0)
- return 1;
+ if (io_local_work_pending(ctx)) {
+ __set_current_state(TASK_RUNNING);
+ if (io_run_local_work(ctx, INT_MAX, IO_LOCAL_TW_DEFAULT_MAX) > 0)
+ return 0;
+ }
+ if (io_run_task_work() > 0)
+ return 0;
if (task_sigpending(current))
return -EINTR;
return 0;
}
-/* when returns >0, the caller should retry */
-static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t *timeout)
+static bool current_pending_io(void)
{
- int ret;
- unsigned long check_cq;
+ struct io_uring_task *tctx = current->io_uring;
- /* make sure we run task_work before checking for signals */
- ret = io_run_task_work_sig(ctx);
- if (ret || io_should_wake(iowq))
- return ret;
+ if (!tctx)
+ return false;
+ return percpu_counter_read_positive(&tctx->inflight);
+}
- check_cq = READ_ONCE(ctx->check_cq);
- if (unlikely(check_cq)) {
- /* let the caller flush overflows, retry */
- if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
- return 1;
- if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
- return -EBADR;
+static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
+{
+ struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
+
+ WRITE_ONCE(iowq->hit_timeout, 1);
+ iowq->min_timeout = 0;
+ wake_up_process(iowq->wq.private);
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Doing min_timeout portion. If we saw any timeouts, events, or have work,
+ * wake up. If not, and we have a normal timeout, switch to that and keep
+ * sleeping.
+ */
+static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
+{
+ struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
+ struct io_ring_ctx *ctx = iowq->ctx;
+
+ /* no general timeout, or shorter (or equal), we are done */
+ if (iowq->timeout == KTIME_MAX ||
+ ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
+ goto out_wake;
+ /* work we may need to run, wake function will see if we need to wake */
+ if (io_has_work(ctx))
+ goto out_wake;
+ /* got events since we started waiting, min timeout is done */
+ if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
+ goto out_wake;
+ /* if we have any events and min timeout expired, we're done */
+ if (io_cqring_events(ctx))
+ goto out_wake;
+
+ /*
+ * If using deferred task_work running and application is waiting on
+ * more than one request, ensure we reset it now where we are switching
+ * to normal sleeps. Any request completion post min_wait should wake
+ * the task and return.
+ */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, 1);
+ smp_mb();
+ if (!llist_empty(&ctx->work_llist))
+ goto out_wake;
}
- if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
- return -ETIME;
+
+ hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup);
+ hrtimer_set_expires(timer, iowq->timeout);
+ return HRTIMER_RESTART;
+out_wake:
+ return io_cqring_timer_wakeup(timer);
+}
+
+static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
+ clockid_t clock_id, ktime_t start_time)
+{
+ ktime_t timeout;
+
+ if (iowq->min_timeout) {
+ timeout = ktime_add_ns(iowq->min_timeout, start_time);
+ hrtimer_setup_on_stack(&iowq->t, io_cqring_min_timer_wakeup, clock_id,
+ HRTIMER_MODE_ABS);
+ } else {
+ timeout = iowq->timeout;
+ hrtimer_setup_on_stack(&iowq->t, io_cqring_timer_wakeup, clock_id,
+ HRTIMER_MODE_ABS);
+ }
+
+ hrtimer_set_expires_range_ns(&iowq->t, timeout, 0);
+ hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
+
+ if (!READ_ONCE(iowq->hit_timeout))
+ schedule();
+
+ hrtimer_cancel(&iowq->t);
+ destroy_hrtimer_on_stack(&iowq->t);
+ __set_current_state(TASK_RUNNING);
+
+ return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
+}
+
+struct ext_arg {
+ size_t argsz;
+ struct timespec64 ts;
+ const sigset_t __user *sig;
+ ktime_t min_time;
+ bool ts_set;
+ bool iowait;
+};
+
+static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
+ ktime_t start_time)
+{
+ int ret = 0;
/*
- * Run task_work after scheduling. If we got woken because of
- * task_work being processed, run it now rather than let the caller
- * do another wait loop.
+ * Mark us as being in io_wait if we have pending requests, so cpufreq
+ * can take into account that the task is waiting for IO - turns out
+ * to be important for low QD IO.
*/
- ret = io_run_task_work_sig(ctx);
- return ret < 0 ? ret : 1;
+ if (ext_arg->iowait && current_pending_io())
+ current->in_iowait = 1;
+ if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
+ ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
+ else
+ schedule();
+ current->in_iowait = 0;
+ return ret;
+}
+
+/* If this returns > 0, the caller should retry */
+static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq,
+ struct ext_arg *ext_arg,
+ ktime_t start_time)
+{
+ if (unlikely(READ_ONCE(ctx->check_cq)))
+ return 1;
+ if (unlikely(io_local_work_pending(ctx)))
+ return 1;
+ if (unlikely(task_work_pending(current)))
+ return 1;
+ if (unlikely(task_sigpending(current)))
+ return -EINTR;
+ if (unlikely(io_should_wake(iowq)))
+ return 0;
+
+ return __io_cqring_wait_schedule(ctx, iowq, ext_arg, start_time);
}
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
*/
-static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
- const sigset_t __user *sig, size_t sigsz,
- struct __kernel_timespec __user *uts)
+static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
+ struct ext_arg *ext_arg)
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
- ktime_t timeout = KTIME_MAX;
+ ktime_t start_time;
int ret;
+ min_events = min_t(int, min_events, ctx->cq_entries);
+
if (!io_allowed_run_tw(ctx))
return -EEXIST;
+ if (io_local_work_pending(ctx))
+ io_run_local_work(ctx, min_events,
+ max(IO_LOCAL_TW_DEFAULT_MAX, min_events));
+ io_run_task_work();
- do {
- /* always run at least 1 task work to process local work */
- ret = io_run_task_work_ctx(ctx);
- if (ret < 0)
- return ret;
- io_cqring_overflow_flush(ctx);
+ if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
+ io_cqring_do_overflow_flush(ctx);
+ if (__io_cqring_events_user(ctx) >= min_events)
+ return 0;
- /* if user messes with these they will just get an early return */
- if (__io_cqring_events_user(ctx) >= min_events)
- return 0;
- } while (ret > 0);
+ init_waitqueue_func_entry(&iowq.wq, io_wake_function);
+ iowq.wq.private = current;
+ INIT_LIST_HEAD(&iowq.wq.entry);
+ iowq.ctx = ctx;
+ iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+ iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
+ iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+ iowq.hit_timeout = 0;
+ iowq.min_timeout = ext_arg->min_time;
+ iowq.timeout = KTIME_MAX;
+ start_time = io_get_time(ctx);
- if (sig) {
+ if (ext_arg->ts_set) {
+ iowq.timeout = timespec64_to_ktime(ext_arg->ts);
+ if (!(flags & IORING_ENTER_ABS_TIMER))
+ iowq.timeout = ktime_add(iowq.timeout, start_time);
+ }
+
+ if (ext_arg->sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
- ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
- sigsz);
+ ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
+ ext_arg->argsz);
else
#endif
- ret = set_user_sigmask(sig, sigsz);
+ ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
if (ret)
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
- }
-
- init_waitqueue_func_entry(&iowq.wq, io_wake_function);
- iowq.wq.private = current;
- INIT_LIST_HEAD(&iowq.wq.entry);
- iowq.ctx = ctx;
- iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
- iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+ io_napi_busy_loop(ctx, &iowq);
trace_io_uring_cqring_wait(ctx, min_events);
do {
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
- finish_wait(&ctx->cq_wait, &iowq.wq);
- io_cqring_do_overflow_flush(ctx);
+ unsigned long check_cq;
+ int nr_wait;
+
+ /* if min timeout has been hit, don't reset wait count */
+ if (!iowq.hit_timeout)
+ nr_wait = (int) iowq.cq_tail -
+ READ_ONCE(ctx->rings->cq.tail);
+ else
+ nr_wait = 1;
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
+ atomic_set(&ctx->cq_wait_nr, nr_wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ } else {
+ prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
+ TASK_INTERRUPTIBLE);
}
- prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
- TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
- if (__io_cqring_events_user(ctx) >= min_events)
+
+ ret = io_cqring_wait_schedule(ctx, &iowq, ext_arg, start_time);
+ __set_current_state(TASK_RUNNING);
+ atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
+
+ /*
+ * Run task_work after scheduling and before io_should_wake().
+ * If we got woken because of task_work being processed, run it
+ * now rather than let the caller do another wait loop.
+ */
+ if (io_local_work_pending(ctx))
+ io_run_local_work(ctx, nr_wait, nr_wait);
+ io_run_task_work();
+
+ /*
+ * Non-local task_work will be run on exit to userspace, but
+ * if we're using DEFER_TASKRUN, then we could have waited
+ * with a timeout for a number of requests. If the timeout
+ * hits, we could have some requests ready to process. Ensure
+ * this break is _after_ we have run task_work, to avoid
+ * deferring running potentially pending requests until the
+ * next time we wait for events.
+ */
+ if (ret < 0)
break;
+
+ check_cq = READ_ONCE(ctx->check_cq);
+ if (unlikely(check_cq)) {
+ /* let the caller flush overflows, retry */
+ if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
+ io_cqring_do_overflow_flush(ctx);
+ if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
+ ret = -EBADR;
+ break;
+ }
+ }
+
+ if (io_should_wake(&iowq)) {
+ ret = 0;
+ break;
+ }
cond_resched();
- } while (ret > 0);
+ } while (1);
- finish_wait(&ctx->cq_wait, &iowq.wq);
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ finish_wait(&ctx->cq_wait, &iowq.wq);
restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
-static void io_mem_free(void *ptr)
+static void io_rings_free(struct io_ring_ctx *ctx)
{
- struct page *page;
-
- if (!ptr)
- return;
-
- page = virt_to_head_page(ptr);
- if (put_page_testzero(page))
- free_compound_page(page);
+ io_free_region(ctx->user, &ctx->sq_region);
+ io_free_region(ctx->user, &ctx->ring_region);
+ ctx->rings = NULL;
+ ctx->sq_sqes = NULL;
}
-static void *io_mem_alloc(size_t size)
+static int rings_size(unsigned int flags, unsigned int sq_entries,
+ unsigned int cq_entries, struct io_rings_layout *rl)
{
- gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
+ struct io_rings *rings;
+ size_t sqe_size;
+ size_t off;
- return (void *) __get_free_pages(gfp, get_order(size));
-}
+ if (flags & IORING_SETUP_CQE_MIXED) {
+ if (cq_entries < 2)
+ return -EOVERFLOW;
+ }
+ if (flags & IORING_SETUP_SQE_MIXED) {
+ if (sq_entries < 2)
+ return -EOVERFLOW;
+ }
-static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
- unsigned int cq_entries, size_t *sq_offset)
-{
- struct io_rings *rings;
- size_t off, sq_array_size;
+ rl->sq_array_offset = SIZE_MAX;
+
+ sqe_size = sizeof(struct io_uring_sqe);
+ if (flags & IORING_SETUP_SQE128)
+ sqe_size *= 2;
+
+ rl->sq_size = array_size(sqe_size, sq_entries);
+ if (rl->sq_size == SIZE_MAX)
+ return -EOVERFLOW;
off = struct_size(rings, cqes, cq_entries);
+ if (flags & IORING_SETUP_CQE32)
+ off = size_mul(off, 2);
if (off == SIZE_MAX)
- return SIZE_MAX;
- if (ctx->flags & IORING_SETUP_CQE32) {
- if (check_shl_overflow(off, 1, &off))
- return SIZE_MAX;
- }
+ return -EOVERFLOW;
#ifdef CONFIG_SMP
off = ALIGN(off, SMP_CACHE_BYTES);
if (off == 0)
- return SIZE_MAX;
+ return -EOVERFLOW;
#endif
- if (sq_offset)
- *sq_offset = off;
-
- sq_array_size = array_size(sizeof(u32), sq_entries);
- if (sq_array_size == SIZE_MAX)
- return SIZE_MAX;
-
- if (check_add_overflow(off, sq_array_size, &off))
- return SIZE_MAX;
-
- return off;
-}
-
-static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
- unsigned int eventfd_async)
-{
- struct io_ev_fd *ev_fd;
- __s32 __user *fds = arg;
- int fd;
+ if (!(flags & IORING_SETUP_NO_SQARRAY)) {
+ size_t sq_array_size;
- ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
- lockdep_is_held(&ctx->uring_lock));
- if (ev_fd)
- return -EBUSY;
+ rl->sq_array_offset = off;
- if (copy_from_user(&fd, fds, sizeof(*fds)))
- return -EFAULT;
-
- ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
- if (!ev_fd)
- return -ENOMEM;
-
- ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
- if (IS_ERR(ev_fd->cq_ev_fd)) {
- int ret = PTR_ERR(ev_fd->cq_ev_fd);
- kfree(ev_fd);
- return ret;
+ sq_array_size = array_size(sizeof(u32), sq_entries);
+ off = size_add(off, sq_array_size);
+ if (off == SIZE_MAX)
+ return -EOVERFLOW;
}
- spin_lock(&ctx->completion_lock);
- ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
- spin_unlock(&ctx->completion_lock);
-
- ev_fd->eventfd_async = eventfd_async;
- ctx->has_evfd = true;
- rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
- atomic_set(&ev_fd->refs, 1);
- atomic_set(&ev_fd->ops, 0);
+ rl->rings_size = off;
return 0;
}
-static int io_eventfd_unregister(struct io_ring_ctx *ctx)
-{
- struct io_ev_fd *ev_fd;
-
- ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
- lockdep_is_held(&ctx->uring_lock));
- if (ev_fd) {
- ctx->has_evfd = false;
- rcu_assign_pointer(ctx->io_ev_fd, NULL);
- if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
- call_rcu(&ev_fd->rcu, io_eventfd_ops);
- return 0;
- }
-
- return -ENXIO;
-}
-
-static void io_req_caches_free(struct io_ring_ctx *ctx)
+static __cold void __io_req_caches_free(struct io_ring_ctx *ctx)
{
+ struct io_kiocb *req;
int nr = 0;
- mutex_lock(&ctx->uring_lock);
- io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
-
while (!io_req_cache_empty(ctx)) {
- struct io_kiocb *req = io_alloc_req(ctx);
-
+ req = io_extract_req(ctx);
+ io_poison_req(req);
kmem_cache_free(req_cachep, req);
nr++;
}
- if (nr)
+ if (nr) {
+ ctx->nr_req_allocated -= nr;
percpu_ref_put_many(&ctx->refs, nr);
- mutex_unlock(&ctx->uring_lock);
+ }
+}
+
+static __cold void io_req_caches_free(struct io_ring_ctx *ctx)
+{
+ guard(mutex)(&ctx->uring_lock);
+ __io_req_caches_free(ctx);
}
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_sq_thread_finish(ctx);
- io_rsrc_refs_drop(ctx);
- /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
- io_wait_rsrc_data(ctx->buf_data);
- io_wait_rsrc_data(ctx->file_data);
mutex_lock(&ctx->uring_lock);
- if (ctx->buf_data)
- __io_sqe_buffers_unregister(ctx);
- if (ctx->file_data)
- __io_sqe_files_unregister(ctx);
+ io_sqe_buffers_unregister(ctx);
+ io_sqe_files_unregister(ctx);
+ io_unregister_zcrx_ifqs(ctx);
io_cqring_overflow_kill(ctx);
io_eventfd_unregister(ctx);
- io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
- io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
- mutex_unlock(&ctx->uring_lock);
+ io_free_alloc_caches(ctx);
io_destroy_buffers(ctx);
+ io_free_region(ctx->user, &ctx->param_region);
+ mutex_unlock(&ctx->uring_lock);
if (ctx->sq_creds)
put_cred(ctx->sq_creds);
if (ctx->submitter_task)
put_task_struct(ctx->submitter_task);
- /* there are no registered resources left, nobody uses it */
- if (ctx->rsrc_node)
- io_rsrc_node_destroy(ctx->rsrc_node);
- if (ctx->rsrc_backup_node)
- io_rsrc_node_destroy(ctx->rsrc_backup_node);
- flush_delayed_work(&ctx->rsrc_put_work);
- flush_delayed_work(&ctx->fallback_work);
-
- WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
- WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
-
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- ctx->ring_sock->file = NULL; /* so that iput() is called */
- sock_release(ctx->ring_sock);
- }
-#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
if (ctx->mm_account) {
mmdrop(ctx->mm_account);
ctx->mm_account = NULL;
}
- io_mem_free(ctx->rings);
- io_mem_free(ctx->sq_sqes);
+ io_rings_free(ctx);
+
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ static_branch_dec(&io_key_has_sqarray);
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
io_req_caches_free(ctx);
+
+ WARN_ON_ONCE(ctx->nr_req_allocated);
+
if (ctx->hash_map)
io_wq_put_hash(ctx->hash_map);
- kfree(ctx->cancel_table.hbs);
- kfree(ctx->cancel_table_locked.hbs);
- kfree(ctx->dummy_ubuf);
- kfree(ctx->io_bl);
+ io_napi_free(ctx);
+ kvfree(ctx->cancel_table.hbs);
xa_destroy(&ctx->io_bl_xa);
kfree(ctx);
}
+static __cold void io_activate_pollwq_cb(struct callback_head *cb)
+{
+ struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
+ poll_wq_task_work);
+
+ mutex_lock(&ctx->uring_lock);
+ ctx->poll_activated = true;
+ mutex_unlock(&ctx->uring_lock);
+
+ /*
+ * Wake ups for some events between start of polling and activation
+ * might've been lost due to loose synchronisation.
+ */
+ wake_up_all(&ctx->poll_wq);
+ percpu_ref_put(&ctx->refs);
+}
+
+__cold void io_activate_pollwq(struct io_ring_ctx *ctx)
+{
+ spin_lock(&ctx->completion_lock);
+ /* already activated or in progress */
+ if (ctx->poll_activated || ctx->poll_wq_task_work.func)
+ goto out;
+ if (WARN_ON_ONCE(!ctx->task_complete))
+ goto out;
+ if (!ctx->submitter_task)
+ goto out;
+ /*
+ * with ->submitter_task only the submitter task completes requests, we
+ * only need to sync with it, which is done by injecting a tw
+ */
+ init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
+ percpu_ref_get(&ctx->refs);
+ if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
+ percpu_ref_put(&ctx->refs);
+out:
+ spin_unlock(&ctx->completion_lock);
+}
+
static __poll_t io_uring_poll(struct file *file, poll_table *wait)
{
struct io_ring_ctx *ctx = file->private_data;
__poll_t mask = 0;
- poll_wait(file, &ctx->cq_wait, wait);
+ if (unlikely(!ctx->poll_activated))
+ io_activate_pollwq(ctx);
/*
- * synchronizes with barrier from wq_has_sleeper call in
- * io_commit_cqring
+ * provides mb() which pairs with barrier from wq_has_sleeper
+ * call in io_commit_cqring
*/
- smp_rmb();
+ poll_wait(file, &ctx->poll_wq, wait);
+
if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM;
@@ -2790,25 +2948,12 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* pushes them to do the flush.
*/
- if (io_cqring_events(ctx) || io_has_work(ctx))
+ if (__io_cqring_events_user(ctx) || io_has_work(ctx))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
-static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
-{
- const struct cred *creds;
-
- creds = xa_erase(&ctx->personalities, id);
- if (creds) {
- put_cred(creds);
- return 0;
- }
-
- return -EINVAL;
-}
-
struct io_tctx_exit {
struct callback_head task_work;
struct completion completion;
@@ -2822,23 +2967,16 @@ static __cold void io_tctx_exit_cb(struct callback_head *cb)
work = container_of(cb, struct io_tctx_exit, task_work);
/*
- * When @in_idle, we're in cancellation and it's racy to remove the
+ * When @in_cancel, we're in cancellation and it's racy to remove the
* node. It'll be removed by the end of cancellation, just ignore it.
* tctx can be NULL if the queueing of this task_work raced with
* work cancelation off the exec path.
*/
- if (tctx && !atomic_read(&tctx->in_idle))
+ if (tctx && !atomic_read(&tctx->in_cancel))
io_uring_del_tctx_node((unsigned long)work->ctx);
complete(&work->completion);
}
-static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
-
- return req->ctx == data;
-}
-
static __cold void io_ring_exit_work(struct work_struct *work)
{
struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
@@ -2864,7 +3002,8 @@ static __cold void io_ring_exit_work(struct work_struct *work)
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
io_move_task_work_from_local(ctx);
- while (io_uring_try_cancel_requests(ctx, NULL, true))
+ /* The SQPOLL thread never reaches this path */
+ while (io_uring_try_cancel_requests(ctx, NULL, true, false))
cond_resched();
if (ctx->sq_data) {
@@ -2872,7 +3011,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
struct task_struct *tsk;
io_sq_thread_park(sqd);
- tsk = sqd->thread;
+ tsk = sqpoll_task_locked(sqd);
if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
io_wq_cancel_cb(tsk->io_uring->io_wq,
io_cancel_ctx_cb, ctx, true);
@@ -2885,17 +3024,23 @@ static __cold void io_ring_exit_work(struct work_struct *work)
/* there is little hope left, don't run it too often */
interval = HZ * 60;
}
- } while (!wait_for_completion_timeout(&ctx->ref_comp, interval));
+ /*
+ * This is really an uninterruptible wait, as it has to be
+ * complete. But it's also run from a kworker, which doesn't
+ * take signals, so it's fine to make it interruptible. This
+ * avoids scenarios where we knowingly can wait much longer
+ * on completions, for example if someone does a SIGSTOP on
+ * a task that needs to finish task_work to make this loop
+ * complete. That's a synthetic situation that should not
+ * cause a stuck task backtrace, and hence a potential panic
+ * on stuck tasks if that is enabled.
+ */
+ } while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
@@ -2909,13 +3054,22 @@ static __cold void io_ring_exit_work(struct work_struct *work)
continue;
mutex_unlock(&ctx->uring_lock);
- wait_for_completion(&exit.completion);
+ /*
+ * See comment above for
+ * wait_for_completion_interruptible_timeout() on why this
+ * wait is marked as interruptible.
+ */
+ wait_for_completion_interruptible(&exit.completion);
mutex_lock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);
spin_lock(&ctx->completion_lock);
spin_unlock(&ctx->completion_lock);
+ /* pairs with RCU read section in io_req_local_work_add() */
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ synchronize_rcu();
+
io_ring_ctx_free(ctx);
}
@@ -2928,25 +3082,18 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
percpu_ref_kill(&ctx->refs);
xa_for_each(&ctx->personalities, index, creds)
io_unregister_personality(ctx, index);
- if (ctx->rings)
- io_poll_remove_all(ctx, NULL, true);
mutex_unlock(&ctx->uring_lock);
- /*
- * If we failed setting up the ctx, we might not have any rings
- * and therefore did not submit any requests
- */
- if (ctx->rings)
- io_kill_timeouts(ctx, NULL, true);
+ flush_delayed_work(&ctx->fallback_work);
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
/*
- * Use system_unbound_wq to avoid spawning tons of event kworkers
+ * Use system_dfl_wq to avoid spawning tons of event kworkers
* if we're exiting a ton of rings at the same time. It just adds
* noise and overhead, there's no discernable change in runtime
- * over using system_wq.
+ * over using system_percpu_wq.
*/
- queue_work(system_unbound_wq, &ctx->exit_work);
+ queue_work(iou_wq, &ctx->exit_work);
}
static int io_uring_release(struct inode *inode, struct file *file)
@@ -2958,301 +3105,77 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0;
}
-struct io_task_cancel {
- struct task_struct *task;
- bool all;
-};
-
-static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
-{
- struct io_kiocb *req = container_of(work, struct io_kiocb, work);
- struct io_task_cancel *cancel = data;
-
- return io_match_task_safe(req, cancel->task, cancel->all);
-}
-
-static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all)
-{
- struct io_defer_entry *de;
- LIST_HEAD(list);
-
- spin_lock(&ctx->completion_lock);
- list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_task_safe(de->req, task, cancel_all)) {
- list_cut_position(&list, &ctx->defer_list, &de->list);
- break;
- }
- }
- spin_unlock(&ctx->completion_lock);
- if (list_empty(&list))
- return false;
-
- while (!list_empty(&list)) {
- de = list_first_entry(&list, struct io_defer_entry, list);
- list_del_init(&de->list);
- io_req_task_queue_fail(de->req, -ECANCELED);
- kfree(de);
- }
- return true;
-}
-
-static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
-{
- struct io_tctx_node *node;
- enum io_wq_cancel cret;
- bool ret = false;
-
- mutex_lock(&ctx->uring_lock);
- list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
-
- /*
- * io_wq will stay alive while we hold uring_lock, because it's
- * killed after ctx nodes, which requires to take the lock.
- */
- if (!tctx || !tctx->io_wq)
- continue;
- cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
- mutex_unlock(&ctx->uring_lock);
-
- return ret;
-}
-
-static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
- struct task_struct *task,
- bool cancel_all)
+static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx,
+ const struct io_uring_getevents_arg __user *uarg)
{
- struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
- struct io_uring_task *tctx = task ? task->io_uring : NULL;
- enum io_wq_cancel cret;
- bool ret = false;
+ unsigned long size = sizeof(struct io_uring_reg_wait);
+ unsigned long offset = (uintptr_t)uarg;
+ unsigned long end;
- /* failed during ring init, it couldn't have issued any requests */
- if (!ctx->rings)
- return false;
+ if (unlikely(offset % sizeof(long)))
+ return ERR_PTR(-EFAULT);
- if (!task) {
- ret |= io_uring_try_cancel_iowq(ctx);
- } else if (tctx && tctx->io_wq) {
- /*
- * Cancels requests of all rings, not only @ctx, but
- * it's fine as the task is in exit/exec.
- */
- cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
- &cancel, true);
- ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
- }
-
- /* SQPOLL thread does its own polling */
- if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
- (ctx->sq_data && ctx->sq_data->thread == current)) {
- while (!wq_list_empty(&ctx->iopoll_list)) {
- io_iopoll_try_reap_events(ctx);
- ret = true;
- }
- }
+ /* also protects from NULL ->cq_wait_arg as the size would be 0 */
+ if (unlikely(check_add_overflow(offset, size, &end) ||
+ end > ctx->cq_wait_size))
+ return ERR_PTR(-EFAULT);
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- ret |= io_run_local_work(ctx) > 0;
- ret |= io_cancel_defer_files(ctx, task, cancel_all);
- mutex_lock(&ctx->uring_lock);
- ret |= io_poll_remove_all(ctx, task, cancel_all);
- mutex_unlock(&ctx->uring_lock);
- ret |= io_kill_timeouts(ctx, task, cancel_all);
- if (task)
- ret |= io_run_task_work() > 0;
- return ret;
+ offset = array_index_nospec(offset, ctx->cq_wait_size - size);
+ return ctx->cq_wait_arg + offset;
}
-static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
+static int io_validate_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
+ const void __user *argp, size_t argsz)
{
- if (tracked)
- return atomic_read(&tctx->inflight_tracked);
- return percpu_counter_sum(&tctx->inflight);
-}
-
-/*
- * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
- */
-__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
-{
- struct io_uring_task *tctx = current->io_uring;
- struct io_ring_ctx *ctx;
- s64 inflight;
- DEFINE_WAIT(wait);
-
- WARN_ON_ONCE(sqd && sqd->thread != current);
-
- if (!current->io_uring)
- return;
- if (tctx->io_wq)
- io_wq_exit_start(tctx->io_wq);
-
- atomic_inc(&tctx->in_idle);
- do {
- bool loop = false;
-
- io_uring_drop_tctx_refs(current);
- /* read completions before cancelations */
- inflight = tctx_inflight(tctx, !cancel_all);
- if (!inflight)
- break;
-
- if (!sqd) {
- struct io_tctx_node *node;
- unsigned long index;
-
- xa_for_each(&tctx->xa, index, node) {
- /* sqpoll task will cancel all its requests */
- if (node->ctx->sq_data)
- continue;
- loop |= io_uring_try_cancel_requests(node->ctx,
- current, cancel_all);
- }
- } else {
- list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
- loop |= io_uring_try_cancel_requests(ctx,
- current,
- cancel_all);
- }
-
- if (loop) {
- cond_resched();
- continue;
- }
-
- prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
- io_run_task_work();
- io_uring_drop_tctx_refs(current);
-
- /*
- * If we've seen completions, retry without waiting. This
- * avoids a race where a completion comes in before we did
- * prepare_to_wait().
- */
- if (inflight == tctx_inflight(tctx, !cancel_all))
- schedule();
- finish_wait(&tctx->wait, &wait);
- } while (1);
-
- io_uring_clean_tctx(tctx);
- if (cancel_all) {
- /*
- * We shouldn't run task_works after cancel, so just leave
- * ->in_idle set for normal exit.
- */
- atomic_dec(&tctx->in_idle);
- /* for exec all current's requests should be gone, kill tctx */
- __io_uring_free(current);
- }
-}
-
-void __io_uring_cancel(bool cancel_all)
-{
- io_uring_cancel_generic(cancel_all, NULL);
-}
-
-static void *io_uring_validate_mmap_request(struct file *file,
- loff_t pgoff, size_t sz)
-{
- struct io_ring_ctx *ctx = file->private_data;
- loff_t offset = pgoff << PAGE_SHIFT;
- struct page *page;
- void *ptr;
-
- switch (offset) {
- case IORING_OFF_SQ_RING:
- case IORING_OFF_CQ_RING:
- ptr = ctx->rings;
- break;
- case IORING_OFF_SQES:
- ptr = ctx->sq_sqes;
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
- page = virt_to_head_page(ptr);
- if (sz > page_size(page))
- return ERR_PTR(-EINVAL);
-
- return ptr;
-}
-
-#ifdef CONFIG_MMU
-
-static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
-{
- size_t sz = vma->vm_end - vma->vm_start;
- unsigned long pfn;
- void *ptr;
-
- ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
-
- pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
- return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
-}
-
-#else /* !CONFIG_MMU */
-
-static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
-}
-
-static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
-{
- return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
-}
-
-static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags)
-{
- void *ptr;
-
- ptr = io_uring_validate_mmap_request(file, pgoff, len);
- if (IS_ERR(ptr))
- return PTR_ERR(ptr);
-
- return (unsigned long) ptr;
-}
-
-#endif /* !CONFIG_MMU */
-
-static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
-{
- if (flags & IORING_ENTER_EXT_ARG) {
- struct io_uring_getevents_arg arg;
+ struct io_uring_getevents_arg arg;
- if (argsz != sizeof(arg))
- return -EINVAL;
- if (copy_from_user(&arg, argp, sizeof(arg)))
- return -EFAULT;
- }
+ if (!(flags & IORING_ENTER_EXT_ARG))
+ return 0;
+ if (flags & IORING_ENTER_EXT_ARG_REG)
+ return -EINVAL;
+ if (argsz != sizeof(arg))
+ return -EINVAL;
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
return 0;
}
-static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
- struct __kernel_timespec __user **ts,
- const sigset_t __user **sig)
+static int io_get_ext_arg(struct io_ring_ctx *ctx, unsigned flags,
+ const void __user *argp, struct ext_arg *ext_arg)
{
+ const struct io_uring_getevents_arg __user *uarg = argp;
struct io_uring_getevents_arg arg;
+ ext_arg->iowait = !(flags & IORING_ENTER_NO_IOWAIT);
+
/*
* If EXT_ARG isn't set, then we have no timespec and the argp pointer
* is just a pointer to the sigset_t.
*/
if (!(flags & IORING_ENTER_EXT_ARG)) {
- *sig = (const sigset_t __user *) argp;
- *ts = NULL;
+ ext_arg->sig = (const sigset_t __user *) argp;
+ return 0;
+ }
+
+ if (flags & IORING_ENTER_EXT_ARG_REG) {
+ struct io_uring_reg_wait *w;
+
+ if (ext_arg->argsz != sizeof(struct io_uring_reg_wait))
+ return -EINVAL;
+ w = io_get_ext_arg_reg(ctx, argp);
+ if (IS_ERR(w))
+ return PTR_ERR(w);
+
+ if (w->flags & ~IORING_REG_WAIT_TS)
+ return -EINVAL;
+ ext_arg->min_time = READ_ONCE(w->min_wait_usec) * NSEC_PER_USEC;
+ ext_arg->sig = u64_to_user_ptr(READ_ONCE(w->sigmask));
+ ext_arg->argsz = READ_ONCE(w->sigmask_sz);
+ if (w->flags & IORING_REG_WAIT_TS) {
+ ext_arg->ts.tv_sec = READ_ONCE(w->ts.tv_sec);
+ ext_arg->ts.tv_nsec = READ_ONCE(w->ts.tv_nsec);
+ ext_arg->ts_set = true;
+ }
return 0;
}
@@ -3260,16 +3183,34 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
* EXT_ARG is set - ensure we agree on the size of it and copy in our
* timespec and sigset_t pointers if good.
*/
- if (*argsz != sizeof(arg))
+ if (ext_arg->argsz != sizeof(arg))
return -EINVAL;
- if (copy_from_user(&arg, argp, sizeof(arg)))
+#ifdef CONFIG_64BIT
+ if (!user_access_begin(uarg, sizeof(*uarg)))
return -EFAULT;
- if (arg.pad)
- return -EINVAL;
- *sig = u64_to_user_ptr(arg.sigmask);
- *argsz = arg.sigmask_sz;
- *ts = u64_to_user_ptr(arg.ts);
+ unsafe_get_user(arg.sigmask, &uarg->sigmask, uaccess_end);
+ unsafe_get_user(arg.sigmask_sz, &uarg->sigmask_sz, uaccess_end);
+ unsafe_get_user(arg.min_wait_usec, &uarg->min_wait_usec, uaccess_end);
+ unsafe_get_user(arg.ts, &uarg->ts, uaccess_end);
+ user_access_end();
+#else
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+#endif
+ ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
+ ext_arg->sig = u64_to_user_ptr(arg.sigmask);
+ ext_arg->argsz = arg.sigmask_sz;
+ if (arg.ts) {
+ if (get_timespec64(&ext_arg->ts, u64_to_user_ptr(arg.ts)))
+ return -EFAULT;
+ ext_arg->ts_set = true;
+ }
return 0;
+#ifdef CONFIG_64BIT
+uaccess_end:
+ user_access_end();
+ return -EFAULT;
+#endif
}
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
@@ -3277,12 +3218,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
size_t, argsz)
{
struct io_ring_ctx *ctx;
- struct fd f;
+ struct file *file;
long ret;
- if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
- IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
- IORING_ENTER_REGISTERED_RING)))
+ if (unlikely(flags & ~IORING_ENTER_FLAGS))
return -EINVAL;
/*
@@ -3295,20 +3234,19 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
return -EINVAL;
fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
- f.file = tctx->registered_rings[fd];
- f.flags = 0;
- if (unlikely(!f.file))
+ file = tctx->registered_rings[fd];
+ if (unlikely(!file))
return -EBADF;
} else {
- f = fdget(fd);
- if (unlikely(!f.file))
+ file = fget(fd);
+ if (unlikely(!file))
return -EBADF;
ret = -EOPNOTSUPP;
- if (unlikely(!io_is_uring_fops(f.file)))
+ if (unlikely(!io_is_uring_fops(file)))
goto out;
}
- ctx = f.file->private_data;
+ ctx = file->private_data;
ret = -EBADFD;
if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
goto out;
@@ -3320,19 +3258,15 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
- io_cqring_overflow_flush(ctx);
-
if (unlikely(ctx->sq_data->thread == NULL)) {
ret = -EOWNERDEAD;
goto out;
}
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
- if (flags & IORING_ENTER_SQ_WAIT) {
- ret = io_sqpoll_wait_sq(ctx);
- if (ret)
- goto out;
- }
+ if (flags & IORING_ENTER_SQ_WAIT)
+ io_sqpoll_wait_sq(ctx);
+
ret = to_submit;
} else if (to_submit) {
ret = io_uring_add_tctx_node(ctx);
@@ -3353,7 +3287,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
* it should handle ownership problems if any.
*/
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- (void)io_run_local_work_locked(ctx);
+ (void)io_run_local_work_locked(ctx, min_complete);
}
mutex_unlock(&ctx->uring_lock);
}
@@ -3370,24 +3304,17 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
mutex_lock(&ctx->uring_lock);
iopoll_locked:
- ret2 = io_validate_ext_arg(flags, argp, argsz);
- if (likely(!ret2)) {
- min_complete = min(min_complete,
- ctx->cq_entries);
+ ret2 = io_validate_ext_arg(ctx, flags, argp, argsz);
+ if (likely(!ret2))
ret2 = io_iopoll_check(ctx, min_complete);
- }
mutex_unlock(&ctx->uring_lock);
} else {
- const sigset_t __user *sig;
- struct __kernel_timespec __user *ts;
-
- ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
- if (likely(!ret2)) {
- min_complete = min(min_complete,
- ctx->cq_entries);
- ret2 = io_cqring_wait(ctx, min_complete, sig,
- argsz, ts);
- }
+ struct ext_arg ext_arg = { .argsz = argsz };
+
+ ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg);
+ if (likely(!ret2))
+ ret2 = io_cqring_wait(ctx, min_complete, flags,
+ &ext_arg);
}
if (!ret) {
@@ -3404,15 +3331,16 @@ iopoll_locked:
}
}
out:
- fdput(f);
+ if (!(flags & IORING_ENTER_REGISTERED_RING))
+ fput(file);
return ret;
}
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
.mmap = io_uring_mmap,
+ .get_unmapped_area = io_uring_get_unmapped_area,
#ifndef CONFIG_MMU
- .get_unmapped_area = io_uring_nommu_get_unmapped_area,
.mmap_capabilities = io_uring_nommu_mmap_capabilities,
#endif
.poll = io_uring_poll,
@@ -3427,63 +3355,59 @@ bool io_is_uring_fops(struct file *file)
}
static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
- struct io_uring_params *p)
+ struct io_ctx_config *config)
{
+ struct io_uring_params *p = &config->p;
+ struct io_rings_layout *rl = &config->layout;
+ struct io_uring_region_desc rd;
struct io_rings *rings;
- size_t size, sq_array_offset;
+ int ret;
/* make sure these are sane, as we already accounted them */
ctx->sq_entries = p->sq_entries;
ctx->cq_entries = p->cq_entries;
- size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
- if (size == SIZE_MAX)
- return -EOVERFLOW;
-
- rings = io_mem_alloc(size);
- if (!rings)
- return -ENOMEM;
-
- ctx->rings = rings;
- ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
- rings->sq_ring_mask = p->sq_entries - 1;
- rings->cq_ring_mask = p->cq_entries - 1;
- rings->sq_ring_entries = p->sq_entries;
- rings->cq_ring_entries = p->cq_entries;
-
- if (p->flags & IORING_SETUP_SQE128)
- size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
- else
- size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
- if (size == SIZE_MAX) {
- io_mem_free(ctx->rings);
- ctx->rings = NULL;
- return -EOVERFLOW;
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(rl->rings_size);
+ if (ctx->flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p->cq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
}
+ ret = io_create_region(ctx, &ctx->ring_region, &rd, IORING_OFF_CQ_RING);
+ if (ret)
+ return ret;
+ ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset);
- ctx->sq_sqes = io_mem_alloc(size);
- if (!ctx->sq_sqes) {
- io_mem_free(ctx->rings);
- ctx->rings = NULL;
- return -ENOMEM;
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(rl->sq_size);
+ if (ctx->flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p->sq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
+ }
+ ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
+ if (ret) {
+ io_rings_free(ctx);
+ return ret;
}
+ ctx->sq_sqes = io_region_get_ptr(&ctx->sq_region);
+ memset(rings, 0, sizeof(*rings));
+ WRITE_ONCE(rings->sq_ring_mask, ctx->sq_entries - 1);
+ WRITE_ONCE(rings->cq_ring_mask, ctx->cq_entries - 1);
+ WRITE_ONCE(rings->sq_ring_entries, ctx->sq_entries);
+ WRITE_ONCE(rings->cq_ring_entries, ctx->cq_entries);
return 0;
}
-static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
+static int io_uring_install_fd(struct file *file)
{
- int ret, fd;
+ int fd;
fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
if (fd < 0)
return fd;
-
- ret = __io_uring_add_tctx_node(ctx);
- if (ret) {
- put_unused_fd(fd);
- return ret;
- }
fd_install(fd, file);
return fd;
}
@@ -3491,40 +3415,74 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
/*
* Allocate an anonymous fd, this is what constitutes the application
* visible backing of an io_uring instance. The application mmaps this
- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
- * we have to tie this fd to a socket for file garbage collection purposes.
+ * fd to gain access to the SQ/CQ ring details.
*/
static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
{
- struct file *file;
-#if defined(CONFIG_UNIX)
- int ret;
+ /* Create a new inode so that the LSM can block the creation. */
+ return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
+ O_RDWR | O_CLOEXEC, NULL);
+}
- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
- &ctx->ring_sock);
- if (ret)
- return ERR_PTR(ret);
-#endif
+static int io_uring_sanitise_params(struct io_uring_params *p)
+{
+ unsigned flags = p->flags;
- file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
- O_RDWR | O_CLOEXEC, NULL);
-#if defined(CONFIG_UNIX)
- if (IS_ERR(file)) {
- sock_release(ctx->ring_sock);
- ctx->ring_sock = NULL;
- } else {
- ctx->ring_sock->file = file;
+ if (flags & ~IORING_SETUP_FLAGS)
+ return -EINVAL;
+
+ /* There is no way to mmap rings without a real fd */
+ if ((flags & IORING_SETUP_REGISTERED_FD_ONLY) &&
+ !(flags & IORING_SETUP_NO_MMAP))
+ return -EINVAL;
+
+ if (flags & IORING_SETUP_SQPOLL) {
+ /* IPI related flags don't make sense with SQPOLL */
+ if (flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_TASKRUN_FLAG |
+ IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
}
-#endif
- return file;
+
+ if (flags & IORING_SETUP_TASKRUN_FLAG) {
+ if (!(flags & (IORING_SETUP_COOP_TASKRUN |
+ IORING_SETUP_DEFER_TASKRUN)))
+ return -EINVAL;
+ }
+
+ /* HYBRID_IOPOLL only valid with IOPOLL */
+ if ((flags & IORING_SETUP_HYBRID_IOPOLL) && !(flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
+ /*
+ * For DEFER_TASKRUN we require the completion task to be the same as
+ * the submission task. This implies that there is only one submitter.
+ */
+ if ((flags & IORING_SETUP_DEFER_TASKRUN) &&
+ !(flags & IORING_SETUP_SINGLE_ISSUER))
+ return -EINVAL;
+
+ /*
+ * Nonsensical to ask for CQE32 and mixed CQE support, it's not
+ * supported to post 16b CQEs on a ring setup with CQE32.
+ */
+ if ((flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)) ==
+ (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED))
+ return -EINVAL;
+ /*
+ * Nonsensical to ask for SQE128 and mixed SQE support, it's not
+ * supported to post 64b SQEs on a ring setup with SQE128.
+ */
+ if ((flags & (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED)) ==
+ (IORING_SETUP_SQE128|IORING_SETUP_SQE_MIXED))
+ return -EINVAL;
+
+ return 0;
}
-static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
- struct io_uring_params __user *params)
+static int io_uring_fill_params(struct io_uring_params *p)
{
- struct io_ring_ctx *ctx;
- struct file *file;
- int ret;
+ unsigned entries = p->sq_entries;
if (!entries)
return -EINVAL;
@@ -3563,15 +3521,90 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
p->cq_entries = 2 * p->sq_entries;
}
+ return 0;
+}
+
+int io_prepare_config(struct io_ctx_config *config)
+{
+ struct io_uring_params *p = &config->p;
+ int ret;
+
+ ret = io_uring_sanitise_params(p);
+ if (ret)
+ return ret;
+
+ ret = io_uring_fill_params(p);
+ if (ret)
+ return ret;
+
+ ret = rings_size(p->flags, p->sq_entries, p->cq_entries,
+ &config->layout);
+ if (ret)
+ return ret;
+
+ p->sq_off.head = offsetof(struct io_rings, sq.head);
+ p->sq_off.tail = offsetof(struct io_rings, sq.tail);
+ p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
+ p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
+ p->sq_off.flags = offsetof(struct io_rings, sq_flags);
+ p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
+ p->sq_off.resv1 = 0;
+ if (!(p->flags & IORING_SETUP_NO_MMAP))
+ p->sq_off.user_addr = 0;
+
+ p->cq_off.head = offsetof(struct io_rings, cq.head);
+ p->cq_off.tail = offsetof(struct io_rings, cq.tail);
+ p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
+ p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
+ p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
+ p->cq_off.cqes = offsetof(struct io_rings, cqes);
+ p->cq_off.flags = offsetof(struct io_rings, cq_flags);
+ p->cq_off.resv1 = 0;
+ if (!(p->flags & IORING_SETUP_NO_MMAP))
+ p->cq_off.user_addr = 0;
+ if (!(p->flags & IORING_SETUP_NO_SQARRAY))
+ p->sq_off.array = config->layout.sq_array_offset;
+
+ return 0;
+}
+
+static __cold int io_uring_create(struct io_ctx_config *config)
+{
+ struct io_uring_params *p = &config->p;
+ struct io_ring_ctx *ctx;
+ struct io_uring_task *tctx;
+ struct file *file;
+ int ret;
+
+ ret = io_prepare_config(config);
+ if (ret)
+ return ret;
+
ctx = io_ring_ctx_alloc(p);
if (!ctx)
return -ENOMEM;
+ ctx->clockid = CLOCK_MONOTONIC;
+ ctx->clock_offset = 0;
+
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ static_branch_inc(&io_key_has_sqarray);
+
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
!(ctx->flags & IORING_SETUP_IOPOLL) &&
!(ctx->flags & IORING_SETUP_SQPOLL))
ctx->task_complete = true;
+ if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
+ ctx->lockless_cq = true;
+
+ /*
+ * lazy poll_wq activation relies on ->task_complete for synchronisation
+ * purposes, see io_activate_pollwq()
+ */
+ if (!ctx->task_complete)
+ ctx->poll_activated = true;
+
/*
* When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
* space applications don't need to do io completion events
@@ -3583,39 +3616,17 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
ctx->syscall_iopoll = 1;
ctx->compat = in_compat_syscall();
- if (!capable(CAP_IPC_LOCK))
+ if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
ctx->user = get_uid(current_user());
/*
* For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
* COOP_TASKRUN is set, then IPIs are never needed by the app.
*/
- ret = -EINVAL;
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- /* IPI related flags don't make sense with SQPOLL */
- if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
- IORING_SETUP_TASKRUN_FLAG |
- IORING_SETUP_DEFER_TASKRUN))
- goto err;
+ if (ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_COOP_TASKRUN))
ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
- ctx->notify_method = TWA_SIGNAL_NO_IPI;
- } else {
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
- !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
- goto err;
+ else
ctx->notify_method = TWA_SIGNAL;
- }
-
- /*
- * For DEFER_TASKRUN we require the completion task to be the same as the
- * submission task. This implies that there is only one submitter, so enforce
- * that.
- */
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
- !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
- goto err;
- }
/*
* This is just grabbed for accounting purposes. When a process exits,
@@ -3626,53 +3637,29 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
mmgrab(current->mm);
ctx->mm_account = current->mm;
- ret = io_allocate_scq_urings(ctx, p);
+ ret = io_allocate_scq_urings(ctx, config);
if (ret)
goto err;
ret = io_sq_offload_create(ctx, p);
if (ret)
goto err;
- /* always set a rsrc node */
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- goto err;
- io_rsrc_node_switch(ctx, NULL);
- memset(&p->sq_off, 0, sizeof(p->sq_off));
- p->sq_off.head = offsetof(struct io_rings, sq.head);
- p->sq_off.tail = offsetof(struct io_rings, sq.tail);
- p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
- p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
- p->sq_off.flags = offsetof(struct io_rings, sq_flags);
- p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
- p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
+ p->features = IORING_FEAT_FLAGS;
- memset(&p->cq_off, 0, sizeof(p->cq_off));
- p->cq_off.head = offsetof(struct io_rings, cq.head);
- p->cq_off.tail = offsetof(struct io_rings, cq.tail);
- p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
- p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
- p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
- p->cq_off.cqes = offsetof(struct io_rings, cqes);
- p->cq_off.flags = offsetof(struct io_rings, cq_flags);
-
- p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
- IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
- IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
- IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
- IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
- IORING_FEAT_LINKED_FILE;
-
- if (copy_to_user(params, p, sizeof(*p))) {
+ if (copy_to_user(config->uptr, p, sizeof(*p))) {
ret = -EFAULT;
goto err;
}
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
- && !(ctx->flags & IORING_SETUP_R_DISABLED))
- WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
+ && !(ctx->flags & IORING_SETUP_R_DISABLED)) {
+ /*
+ * Unlike io_register_enable_rings(), don't need WRITE_ONCE()
+ * since ctx isn't yet accessible from other tasks
+ */
+ ctx->submitter_task = get_task_struct(current);
+ }
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
@@ -3680,22 +3667,30 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
goto err;
}
+ ret = __io_uring_add_tctx_node(ctx);
+ if (ret)
+ goto err_fput;
+ tctx = current->io_uring;
+
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
*/
- ret = io_uring_install_fd(ctx, file);
- if (ret < 0) {
- /* fput will clean it up */
- fput(file);
- return ret;
- }
+ if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
+ ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
+ else
+ ret = io_uring_install_fd(file);
+ if (ret < 0)
+ goto err_fput;
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
return ret;
+err_fput:
+ fput(file);
+ return ret;
}
/*
@@ -3705,503 +3700,64 @@ err:
*/
static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
{
- struct io_uring_params p;
- int i;
+ struct io_ctx_config config;
- if (copy_from_user(&p, params, sizeof(p)))
- return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
- if (p.resv[i])
- return -EINVAL;
- }
+ memset(&config, 0, sizeof(config));
- if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
- IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
- IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
- IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
- IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
- IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
- IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
- return -EINVAL;
-
- return io_uring_create(entries, &p, params);
-}
-
-SYSCALL_DEFINE2(io_uring_setup, u32, entries,
- struct io_uring_params __user *, params)
-{
- return io_uring_setup(entries, params);
-}
-
-static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
- unsigned nr_args)
-{
- struct io_uring_probe *p;
- size_t size;
- int i, ret;
-
- size = struct_size(p, ops, nr_args);
- if (size == SIZE_MAX)
- return -EOVERFLOW;
- p = kzalloc(size, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- ret = -EFAULT;
- if (copy_from_user(p, arg, size))
- goto out;
- ret = -EINVAL;
- if (memchr_inv(p, 0, size))
- goto out;
-
- p->last_op = IORING_OP_LAST - 1;
- if (nr_args > IORING_OP_LAST)
- nr_args = IORING_OP_LAST;
-
- for (i = 0; i < nr_args; i++) {
- p->ops[i].op = i;
- if (!io_op_defs[i].not_supported)
- p->ops[i].flags = IO_URING_OP_SUPPORTED;
- }
- p->ops_len = i;
-
- ret = 0;
- if (copy_to_user(arg, p, size))
- ret = -EFAULT;
-out:
- kfree(p);
- return ret;
-}
-
-static int io_register_personality(struct io_ring_ctx *ctx)
-{
- const struct cred *creds;
- u32 id;
- int ret;
-
- creds = get_current_cred();
-
- ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
- XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
- if (ret < 0) {
- put_cred(creds);
- return ret;
- }
- return id;
-}
-
-static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int nr_args)
-{
- struct io_uring_restriction *res;
- size_t size;
- int i, ret;
-
- /* Restrictions allowed only if rings started disabled */
- if (!(ctx->flags & IORING_SETUP_R_DISABLED))
- return -EBADFD;
-
- /* We allow only a single restrictions registration */
- if (ctx->restrictions.registered)
- return -EBUSY;
-
- if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
- return -EINVAL;
-
- size = array_size(nr_args, sizeof(*res));
- if (size == SIZE_MAX)
- return -EOVERFLOW;
-
- res = memdup_user(arg, size);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- ret = 0;
-
- for (i = 0; i < nr_args; i++) {
- switch (res[i].opcode) {
- case IORING_RESTRICTION_REGISTER_OP:
- if (res[i].register_op >= IORING_REGISTER_LAST) {
- ret = -EINVAL;
- goto out;
- }
-
- __set_bit(res[i].register_op,
- ctx->restrictions.register_op);
- break;
- case IORING_RESTRICTION_SQE_OP:
- if (res[i].sqe_op >= IORING_OP_LAST) {
- ret = -EINVAL;
- goto out;
- }
-
- __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
- break;
- case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
- ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
- break;
- case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
- ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
- break;
- default:
- ret = -EINVAL;
- goto out;
- }
- }
-
-out:
- /* Reset all restrictions if an error happened */
- if (ret != 0)
- memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
- else
- ctx->restrictions.registered = true;
-
- kfree(res);
- return ret;
-}
-
-static int io_register_enable_rings(struct io_ring_ctx *ctx)
-{
- if (!(ctx->flags & IORING_SETUP_R_DISABLED))
- return -EBADFD;
-
- if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
- WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
-
- if (ctx->restrictions.registered)
- ctx->restricted = 1;
-
- ctx->flags &= ~IORING_SETUP_R_DISABLED;
- if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
- wake_up(&ctx->sq_data->wait);
- return 0;
-}
-
-static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
- void __user *arg, unsigned len)
-{
- struct io_uring_task *tctx = current->io_uring;
- cpumask_var_t new_mask;
- int ret;
-
- if (!tctx || !tctx->io_wq)
- return -EINVAL;
-
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_clear(new_mask);
- if (len > cpumask_size())
- len = cpumask_size();
-
- if (in_compat_syscall()) {
- ret = compat_get_bitmap(cpumask_bits(new_mask),
- (const compat_ulong_t __user *)arg,
- len * 8 /* CHAR_BIT */);
- } else {
- ret = copy_from_user(new_mask, arg, len);
- }
-
- if (ret) {
- free_cpumask_var(new_mask);
+ if (copy_from_user(&config.p, params, sizeof(config.p)))
return -EFAULT;
- }
- ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
- free_cpumask_var(new_mask);
- return ret;
-}
-
-static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
-{
- struct io_uring_task *tctx = current->io_uring;
-
- if (!tctx || !tctx->io_wq)
+ if (!mem_is_zero(&config.p.resv, sizeof(config.p.resv)))
return -EINVAL;
- return io_wq_cpu_affinity(tctx->io_wq, NULL);
+ config.p.sq_entries = entries;
+ config.uptr = params;
+ return io_uring_create(&config);
}
-static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
- void __user *arg)
- __must_hold(&ctx->uring_lock)
+static inline int io_uring_allowed(void)
{
- struct io_tctx_node *node;
- struct io_uring_task *tctx = NULL;
- struct io_sq_data *sqd = NULL;
- __u32 new_count[2];
- int i, ret;
-
- if (copy_from_user(new_count, arg, sizeof(new_count)))
- return -EFAULT;
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- if (new_count[i] > INT_MAX)
- return -EINVAL;
-
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- sqd = ctx->sq_data;
- if (sqd) {
- /*
- * Observe the correct sqd->lock -> ctx->uring_lock
- * ordering. Fine to drop uring_lock here, we hold
- * a ref to the ctx.
- */
- refcount_inc(&sqd->refs);
- mutex_unlock(&ctx->uring_lock);
- mutex_lock(&sqd->lock);
- mutex_lock(&ctx->uring_lock);
- if (sqd->thread)
- tctx = sqd->thread->io_uring;
- }
- } else {
- tctx = current->io_uring;
- }
-
- BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
+ int disabled = READ_ONCE(sysctl_io_uring_disabled);
+ kgid_t io_uring_group;
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- if (new_count[i])
- ctx->iowq_limits[i] = new_count[i];
- ctx->iowq_limits_set = true;
-
- if (tctx && tctx->io_wq) {
- ret = io_wq_max_workers(tctx->io_wq, new_count);
- if (ret)
- goto err;
- } else {
- memset(new_count, 0, sizeof(new_count));
- }
-
- if (sqd) {
- mutex_unlock(&sqd->lock);
- io_put_sq_data(sqd);
- }
-
- if (copy_to_user(arg, new_count, sizeof(new_count)))
- return -EFAULT;
+ if (disabled == 2)
+ return -EPERM;
- /* that's it for SQPOLL, only the SQPOLL task creates requests */
- if (sqd)
- return 0;
+ if (disabled == 0 || capable(CAP_SYS_ADMIN))
+ goto allowed_lsm;
- /* now propagate the restriction to all registered users */
- list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
- struct io_uring_task *tctx = node->task->io_uring;
+ io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
+ if (!gid_valid(io_uring_group))
+ return -EPERM;
- if (WARN_ON_ONCE(!tctx->io_wq))
- continue;
+ if (!in_group_p(io_uring_group))
+ return -EPERM;
- for (i = 0; i < ARRAY_SIZE(new_count); i++)
- new_count[i] = ctx->iowq_limits[i];
- /* ignore errors, it always returns zero anyway */
- (void)io_wq_max_workers(tctx->io_wq, new_count);
- }
- return 0;
-err:
- if (sqd) {
- mutex_unlock(&sqd->lock);
- io_put_sq_data(sqd);
- }
- return ret;
+allowed_lsm:
+ return security_uring_allowed();
}
-static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
- void __user *arg, unsigned nr_args)
- __releases(ctx->uring_lock)
- __acquires(ctx->uring_lock)
+SYSCALL_DEFINE2(io_uring_setup, u32, entries,
+ struct io_uring_params __user *, params)
{
int ret;
- /*
- * We don't quiesce the refs for register anymore and so it can't be
- * dying as we're holding a file ref here.
- */
- if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
- return -ENXIO;
-
- if (ctx->submitter_task && ctx->submitter_task != current)
- return -EEXIST;
-
- if (ctx->restricted) {
- opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
- if (!test_bit(opcode, ctx->restrictions.register_op))
- return -EACCES;
- }
-
- switch (opcode) {
- case IORING_REGISTER_BUFFERS:
- ret = -EFAULT;
- if (!arg)
- break;
- ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
- break;
- case IORING_UNREGISTER_BUFFERS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_sqe_buffers_unregister(ctx);
- break;
- case IORING_REGISTER_FILES:
- ret = -EFAULT;
- if (!arg)
- break;
- ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
- break;
- case IORING_UNREGISTER_FILES:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_sqe_files_unregister(ctx);
- break;
- case IORING_REGISTER_FILES_UPDATE:
- ret = io_register_files_update(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_EVENTFD:
- ret = -EINVAL;
- if (nr_args != 1)
- break;
- ret = io_eventfd_register(ctx, arg, 0);
- break;
- case IORING_REGISTER_EVENTFD_ASYNC:
- ret = -EINVAL;
- if (nr_args != 1)
- break;
- ret = io_eventfd_register(ctx, arg, 1);
- break;
- case IORING_UNREGISTER_EVENTFD:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_eventfd_unregister(ctx);
- break;
- case IORING_REGISTER_PROBE:
- ret = -EINVAL;
- if (!arg || nr_args > 256)
- break;
- ret = io_probe(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_PERSONALITY:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_register_personality(ctx);
- break;
- case IORING_UNREGISTER_PERSONALITY:
- ret = -EINVAL;
- if (arg)
- break;
- ret = io_unregister_personality(ctx, nr_args);
- break;
- case IORING_REGISTER_ENABLE_RINGS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_register_enable_rings(ctx);
- break;
- case IORING_REGISTER_RESTRICTIONS:
- ret = io_register_restrictions(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_FILES2:
- ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
- break;
- case IORING_REGISTER_FILES_UPDATE2:
- ret = io_register_rsrc_update(ctx, arg, nr_args,
- IORING_RSRC_FILE);
- break;
- case IORING_REGISTER_BUFFERS2:
- ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
- break;
- case IORING_REGISTER_BUFFERS_UPDATE:
- ret = io_register_rsrc_update(ctx, arg, nr_args,
- IORING_RSRC_BUFFER);
- break;
- case IORING_REGISTER_IOWQ_AFF:
- ret = -EINVAL;
- if (!arg || !nr_args)
- break;
- ret = io_register_iowq_aff(ctx, arg, nr_args);
- break;
- case IORING_UNREGISTER_IOWQ_AFF:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_unregister_iowq_aff(ctx);
- break;
- case IORING_REGISTER_IOWQ_MAX_WORKERS:
- ret = -EINVAL;
- if (!arg || nr_args != 2)
- break;
- ret = io_register_iowq_max_workers(ctx, arg);
- break;
- case IORING_REGISTER_RING_FDS:
- ret = io_ringfd_register(ctx, arg, nr_args);
- break;
- case IORING_UNREGISTER_RING_FDS:
- ret = io_ringfd_unregister(ctx, arg, nr_args);
- break;
- case IORING_REGISTER_PBUF_RING:
- ret = -EINVAL;
- if (!arg || nr_args != 1)
- break;
- ret = io_register_pbuf_ring(ctx, arg);
- break;
- case IORING_UNREGISTER_PBUF_RING:
- ret = -EINVAL;
- if (!arg || nr_args != 1)
- break;
- ret = io_unregister_pbuf_ring(ctx, arg);
- break;
- case IORING_REGISTER_SYNC_CANCEL:
- ret = -EINVAL;
- if (!arg || nr_args != 1)
- break;
- ret = io_sync_cancel(ctx, arg);
- break;
- case IORING_REGISTER_FILE_ALLOC_RANGE:
- ret = -EINVAL;
- if (!arg || nr_args)
- break;
- ret = io_register_file_alloc_range(ctx, arg);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
- void __user *, arg, unsigned int, nr_args)
-{
- struct io_ring_ctx *ctx;
- long ret = -EBADF;
- struct fd f;
-
- if (opcode >= IORING_REGISTER_LAST)
- return -EINVAL;
-
- f = fdget(fd);
- if (!f.file)
- return -EBADF;
-
- ret = -EOPNOTSUPP;
- if (!io_is_uring_fops(f.file))
- goto out_fput;
-
- ctx = f.file->private_data;
+ ret = io_uring_allowed();
+ if (ret)
+ return ret;
- mutex_lock(&ctx->uring_lock);
- ret = __io_uring_register(ctx, opcode, arg, nr_args);
- mutex_unlock(&ctx->uring_lock);
- trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
-out_fput:
- fdput(f);
- return ret;
+ return io_uring_setup(entries, params);
}
static int __init io_uring_init(void)
{
+ struct kmem_cache_args kmem_args = {
+ .useroffset = offsetof(struct io_kiocb, cmd.data),
+ .usersize = sizeof_field(struct io_kiocb, cmd.data),
+ .freeptr_offset = offsetof(struct io_kiocb, work),
+ .use_freeptr_offset = true,
+ };
+
#define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
@@ -4250,9 +3806,13 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
+ BUILD_BUG_SQE_ELEM(44, __u8, write_stream);
+ BUILD_BUG_SQE_ELEM(45, __u8, __pad4[0]);
BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
BUILD_BUG_SQE_ELEM(48, __u64, addr3);
BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
+ BUILD_BUG_SQE_ELEM(48, __u64, attr_ptr);
+ BUILD_BUG_SQE_ELEM(56, __u64, attr_type_mask);
BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
@@ -4270,14 +3830,36 @@ static int __init io_uring_init(void)
BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
- BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
+ BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
+ /* top 8bits are for internal use */
+ BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
+
io_uring_optable_init();
- req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
- SLAB_ACCOUNT);
+ /* imu->dir is u8 */
+ BUILD_BUG_ON((IO_IMU_DEST | IO_IMU_SOURCE) > U8_MAX);
+
+ /*
+ * Allow user copy in the per-command field, which starts after the
+ * file in io_kiocb and until the opcode field. The openat2 handling
+ * requires copying in user memory into the io_kiocb object in that
+ * range, and HARDENED_USERCOPY will complain if we haven't
+ * correctly annotated this range.
+ */
+ req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
+ SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+
+ iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
+ BUG_ON(!iou_wq);
+
+#ifdef CONFIG_SYSCTL
+ register_sysctl_init("kernel", kernel_io_uring_disabled_table);
+#endif
+
return 0;
};
__initcall(io_uring_init);
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index ab4b2a1c3b7e..a790c16854d3 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -3,169 +3,305 @@
#include <linux/errno.h>
#include <linux/lockdep.h>
+#include <linux/resume_user_mode.h>
+#include <linux/kasan.h>
+#include <linux/poll.h>
#include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h>
+#include "alloc_cache.h"
#include "io-wq.h"
#include "slist.h"
-#include "filetable.h"
+#include "opdef.h"
#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#endif
+struct io_rings_layout {
+ /* size of CQ + headers + SQ offset array */
+ size_t rings_size;
+ size_t sq_size;
+
+ size_t sq_array_offset;
+};
+
+struct io_ctx_config {
+ struct io_uring_params p;
+ struct io_rings_layout layout;
+ struct io_uring_params __user *uptr;
+};
+
+#define IORING_FEAT_FLAGS (IORING_FEAT_SINGLE_MMAP |\
+ IORING_FEAT_NODROP |\
+ IORING_FEAT_SUBMIT_STABLE |\
+ IORING_FEAT_RW_CUR_POS |\
+ IORING_FEAT_CUR_PERSONALITY |\
+ IORING_FEAT_FAST_POLL |\
+ IORING_FEAT_POLL_32BITS |\
+ IORING_FEAT_SQPOLL_NONFIXED |\
+ IORING_FEAT_EXT_ARG |\
+ IORING_FEAT_NATIVE_WORKERS |\
+ IORING_FEAT_RSRC_TAGS |\
+ IORING_FEAT_CQE_SKIP |\
+ IORING_FEAT_LINKED_FILE |\
+ IORING_FEAT_REG_REG_RING |\
+ IORING_FEAT_RECVSEND_BUNDLE |\
+ IORING_FEAT_MIN_TIMEOUT |\
+ IORING_FEAT_RW_ATTR |\
+ IORING_FEAT_NO_IOWAIT)
+
+#define IORING_SETUP_FLAGS (IORING_SETUP_IOPOLL |\
+ IORING_SETUP_SQPOLL |\
+ IORING_SETUP_SQ_AFF |\
+ IORING_SETUP_CQSIZE |\
+ IORING_SETUP_CLAMP |\
+ IORING_SETUP_ATTACH_WQ |\
+ IORING_SETUP_R_DISABLED |\
+ IORING_SETUP_SUBMIT_ALL |\
+ IORING_SETUP_COOP_TASKRUN |\
+ IORING_SETUP_TASKRUN_FLAG |\
+ IORING_SETUP_SQE128 |\
+ IORING_SETUP_CQE32 |\
+ IORING_SETUP_SINGLE_ISSUER |\
+ IORING_SETUP_DEFER_TASKRUN |\
+ IORING_SETUP_NO_MMAP |\
+ IORING_SETUP_REGISTERED_FD_ONLY |\
+ IORING_SETUP_NO_SQARRAY |\
+ IORING_SETUP_HYBRID_IOPOLL |\
+ IORING_SETUP_CQE_MIXED |\
+ IORING_SETUP_SQE_MIXED)
+
+#define IORING_ENTER_FLAGS (IORING_ENTER_GETEVENTS |\
+ IORING_ENTER_SQ_WAKEUP |\
+ IORING_ENTER_SQ_WAIT |\
+ IORING_ENTER_EXT_ARG |\
+ IORING_ENTER_REGISTERED_RING |\
+ IORING_ENTER_ABS_TIMER |\
+ IORING_ENTER_EXT_ARG_REG |\
+ IORING_ENTER_NO_IOWAIT)
+
+
+#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE |\
+ IOSQE_IO_DRAIN |\
+ IOSQE_IO_LINK |\
+ IOSQE_IO_HARDLINK |\
+ IOSQE_ASYNC |\
+ IOSQE_BUFFER_SELECT |\
+ IOSQE_CQE_SKIP_SUCCESS)
+
enum {
- IOU_OK = 0,
+ IOU_COMPLETE = 0,
+
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
- * Intended only when both IO_URING_F_MULTISHOT is passed
- * to indicate to the poll runner that multishot should be
- * removed and the result is set on req->cqe.res.
+ * The request has more work to do and should be retried. io_uring will
+ * attempt to wait on the file for eligible opcodes, but otherwise
+ * it'll be handed to iowq for blocking execution. It works for normal
+ * requests as well as for the multi shot mode.
+ */
+ IOU_RETRY = -EAGAIN,
+
+ /*
+ * Requeue the task_work to restart operations on this request. The
+ * actual value isn't important, should just be not an otherwise
+ * valid error code, yet less than -MAX_ERRNO and valid internally.
*/
- IOU_STOP_MULTISHOT = -ECANCELED,
+ IOU_REQUEUE = -3072,
};
-struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
-bool io_req_cqe_overflow(struct io_kiocb *req);
+struct io_defer_entry {
+ struct list_head list;
+ struct io_kiocb *req;
+};
+
+struct io_wait_queue {
+ struct wait_queue_entry wq;
+ struct io_ring_ctx *ctx;
+ unsigned cq_tail;
+ unsigned cq_min_tail;
+ unsigned nr_timeouts;
+ int hit_timeout;
+ ktime_t min_timeout;
+ ktime_t timeout;
+ struct hrtimer t;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ ktime_t napi_busy_poll_dt;
+ bool napi_prefer_busy_poll;
+#endif
+};
+
+static inline bool io_should_wake(struct io_wait_queue *iowq)
+{
+ struct io_ring_ctx *ctx = iowq->ctx;
+ int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
+
+ /*
+ * Wake up if we have enough events, or if a timeout occurred since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
+ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+}
+
+#define IORING_MAX_ENTRIES 32768
+#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
+
+int io_prepare_config(struct io_ctx_config *config);
+
+bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
-int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked);
-int io_run_local_work(struct io_ring_ctx *ctx);
+int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
-void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
-bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
- bool allow_overflow);
+void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
+bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
+bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
-struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
-
+unsigned io_linked_nr(struct io_kiocb *req);
+void io_req_track_inflight(struct io_kiocb *req);
struct file *io_file_get_normal(struct io_kiocb *req, int fd);
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
-static inline bool io_req_ffs_set(struct io_kiocb *req)
-{
- return req->flags & REQ_F_FIXED_FILE;
-}
-
-void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
-bool io_is_uring_fops(struct file *file);
-bool io_alloc_async_data(struct io_kiocb *req);
+void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
void io_req_task_queue(struct io_kiocb *req);
-void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
-void io_req_task_complete(struct io_kiocb *req, bool *locked);
+void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
-void io_req_task_submit(struct io_kiocb *req, bool *locked);
+void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw);
+struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
+struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
void tctx_task_work(struct callback_head *cb);
-__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-int io_uring_alloc_task_context(struct task_struct *task,
- struct io_ring_ctx *ctx);
+__cold void io_uring_drop_tctx_refs(struct task_struct *task);
-int io_poll_issue(struct io_kiocb *req, bool *locked);
+int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
+ int start, int end);
+void io_req_queue_iowq(struct io_kiocb *req);
+
+int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
-void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
-int io_req_prep_async(struct io_kiocb *req);
+__cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx);
+void __io_submit_flush_completions(struct io_ring_ctx *ctx);
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
void io_wq_submit_work(struct io_wq_work *work);
void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);
-void __io_put_task(struct task_struct *task, int nr);
void io_task_refs_refill(struct io_uring_task *tctx);
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
-bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
- bool cancel_all);
-
-#define io_lockdep_assert_cq_locked(ctx) \
- do { \
- if (ctx->flags & IORING_SETUP_IOPOLL) { \
- lockdep_assert_held(&ctx->uring_lock); \
- } else if (!ctx->task_complete) { \
- lockdep_assert_held(&ctx->completion_lock); \
- } else if (ctx->submitter_task->flags & PF_EXITING) { \
- lockdep_assert(current_work()); \
- } else { \
- lockdep_assert(current == ctx->submitter_task); \
- } \
- } while (0)
+void io_activate_pollwq(struct io_ring_ctx *ctx);
+
+static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
+{
+#if defined(CONFIG_PROVE_LOCKING)
+ lockdep_assert(in_task());
+
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ lockdep_assert_held(&ctx->uring_lock);
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+ lockdep_assert_held(&ctx->uring_lock);
+ } else if (!ctx->task_complete) {
+ lockdep_assert_held(&ctx->completion_lock);
+ } else if (ctx->submitter_task) {
+ /*
+ * ->submitter_task may be NULL and we can still post a CQE,
+ * if the ring has been setup with IORING_SETUP_R_DISABLED.
+ * Not from an SQE, as those cannot be submitted, but via
+ * updating tagged resources.
+ */
+ if (!percpu_ref_is_dying(&ctx->refs))
+ lockdep_assert(current == ctx->submitter_task);
+ }
+#endif
+}
+
+static inline bool io_is_compat(struct io_ring_ctx *ctx)
+{
+ return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat);
+}
static inline void io_req_task_work_add(struct io_kiocb *req)
{
- __io_req_task_work_add(req, true);
+ __io_req_task_work_add(req, 0);
+}
+
+static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
+{
+ if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
+ ctx->submit_state.cq_flush)
+ __io_submit_flush_completions(ctx);
}
#define io_for_each_link(pos, head) \
for (pos = (head); pos; pos = pos->link)
-void io_cq_unlock_post(struct io_ring_ctx *ctx);
-
-static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
- bool overflow)
+static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
+ struct io_uring_cqe **ret,
+ bool overflow, bool cqe32)
{
io_lockdep_assert_cq_locked(ctx);
- if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
- struct io_uring_cqe *cqe = ctx->cqe_cached;
-
- ctx->cached_cq_tail++;
+ if (unlikely(ctx->cqe_sentinel - ctx->cqe_cached < (cqe32 + 1))) {
+ if (unlikely(!io_cqe_cache_refill(ctx, overflow, cqe32)))
+ return false;
+ }
+ *ret = ctx->cqe_cached;
+ ctx->cached_cq_tail++;
+ ctx->cqe_cached++;
+ if (ctx->flags & IORING_SETUP_CQE32) {
ctx->cqe_cached++;
- if (ctx->flags & IORING_SETUP_CQE32)
- ctx->cqe_cached++;
- return cqe;
+ } else if (cqe32 && ctx->flags & IORING_SETUP_CQE_MIXED) {
+ ctx->cqe_cached++;
+ ctx->cached_cq_tail++;
}
+ WARN_ON_ONCE(ctx->cqe_cached > ctx->cqe_sentinel);
+ return true;
+}
- return __io_get_cqe(ctx, overflow);
+static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret,
+ bool cqe32)
+{
+ return io_get_cqe_overflow(ctx, ret, false, cqe32);
}
-static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
+static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx,
+ struct io_uring_cqe **cqe_ret)
{
- return io_get_cqe_overflow(ctx, false);
+ io_lockdep_assert_cq_locked(ctx);
+
+ ctx->submit_state.cq_flush = true;
+ return io_get_cqe(ctx, cqe_ret, ctx->flags & IORING_SETUP_CQE_MIXED);
}
-static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
+ bool is_cqe32 = req->cqe.flags & IORING_CQE_F_32;
struct io_uring_cqe *cqe;
/*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
+ * If we can't get a cq entry, userspace overflowed the submission
+ * (by quite a lot).
*/
- cqe = io_get_cqe(ctx);
- if (unlikely(!cqe))
+ if (unlikely(!io_get_cqe(ctx, &cqe, is_cqe32)))
return false;
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
- (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
-
memcpy(cqe, &req->cqe, sizeof(*cqe));
-
- if (ctx->flags & IORING_SETUP_CQE32) {
- u64 extra1 = 0, extra2 = 0;
-
- if (req->flags & REQ_F_CQE32_INIT) {
- extra1 = req->extra1;
- extra2 = req->extra2;
- }
-
- WRITE_ONCE(cqe->big_cqe[0], extra1);
- WRITE_ONCE(cqe->big_cqe[1], extra2);
+ if (ctx->flags & IORING_SETUP_CQE32 || is_cqe32) {
+ memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
+ memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
- return true;
-}
-static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
-{
- if (likely(__io_fill_cqe_req(ctx, req)))
- return true;
- return io_req_cqe_overflow(req);
+ if (trace_io_uring_complete_enabled())
+ trace_io_uring_complete(req->ctx, req, cqe);
+ return true;
}
static inline void req_set_fail(struct io_kiocb *req)
@@ -183,22 +319,67 @@ static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
req->cqe.flags = cflags;
}
+static inline u32 ctx_cqe32_flags(struct io_ring_ctx *ctx)
+{
+ if (ctx->flags & IORING_SETUP_CQE_MIXED)
+ return IORING_CQE_F_32;
+ return 0;
+}
+
+static inline void io_req_set_res32(struct io_kiocb *req, s32 res, u32 cflags,
+ __u64 extra1, __u64 extra2)
+{
+ req->cqe.res = res;
+ req->cqe.flags = cflags | ctx_cqe32_flags(req->ctx);
+ req->big_cqe.extra1 = extra1;
+ req->big_cqe.extra2 = extra2;
+}
+
+static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
+ struct io_kiocb *req)
+{
+ if (cache) {
+ req->async_data = io_cache_alloc(cache, GFP_KERNEL);
+ } else {
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+
+ WARN_ON_ONCE(!def->async_size);
+ req->async_data = kmalloc(def->async_size, GFP_KERNEL);
+ }
+ if (req->async_data)
+ req->flags |= REQ_F_ASYNC_DATA;
+ return req->async_data;
+}
+
static inline bool req_has_async_data(struct io_kiocb *req)
{
return req->flags & REQ_F_ASYNC_DATA;
}
-static inline void io_put_file(struct file *file)
+static inline void io_req_async_data_clear(struct io_kiocb *req,
+ io_req_flags_t extra_flags)
+{
+ req->flags &= ~(REQ_F_ASYNC_DATA|extra_flags);
+ req->async_data = NULL;
+}
+
+static inline void io_req_async_data_free(struct io_kiocb *req)
{
- if (file)
- fput(file);
+ kfree(req->async_data);
+ io_req_async_data_clear(req, 0);
+}
+
+static inline void io_put_file(struct io_kiocb *req)
+{
+ if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
+ fput(req->file);
}
static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
unsigned issue_flags)
{
lockdep_assert_held(&ctx->uring_lock);
- if (issue_flags & IO_URING_F_UNLOCKED)
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
mutex_unlock(&ctx->uring_lock);
}
@@ -211,7 +392,7 @@ static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
* The only exception is when we've detached the request and issue it
* from an async worker thread, grab the lock for that case.
*/
- if (issue_flags & IO_URING_F_UNLOCKED)
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
mutex_lock(&ctx->uring_lock);
lockdep_assert_held(&ctx->uring_lock);
}
@@ -222,47 +403,63 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
-/* requires smb_mb() prior, see wq_has_sleeper() */
-static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
+static inline void __io_wq_wake(struct wait_queue_head *wq)
{
/*
- * Trigger waitqueue handler on all waiters on our waitqueue. This
- * won't necessarily wake up all the tasks, io_should_wake() will make
- * that decision.
*
* Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
* set in the mask so that if we recurse back into our own poll
* waitqueue handlers, we know we have a dependency between eventfd or
* epoll and should terminate multishot poll at that point.
*/
- if (waitqueue_active(&ctx->cq_wait))
- __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
- poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+ if (wq_has_sleeper(wq))
+ __wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+}
+
+static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
+{
+ __io_wq_wake(&ctx->poll_wq);
}
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
{
- smp_mb();
- __io_cqring_wake(ctx);
+ /*
+ * Trigger waitqueue handler on all waiters on our waitqueue. This
+ * won't necessarily wake up all the tasks, io_should_wake() will make
+ * that decision.
+ */
+
+ __io_wq_wake(&ctx->cq_wait);
}
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
- return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
+ /*
+ * SQPOLL must use the actual sqring head, as using the cached_sq_head
+ * is race prone if the SQPOLL thread has grabbed entries but not yet
+ * committed them to the ring. For !SQPOLL, this doesn't matter, but
+ * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
+ * just read the actual sqring head unconditionally.
+ */
+ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
+ unsigned int entries;
/* make sure SQ entry isn't read before tail */
- return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+ entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+ return min(entries, ctx->sq_entries);
}
static inline int io_run_task_work(void)
{
+ bool ret = false;
+
/*
* Always check-and-clear the task_work notification signal. With how
* signaling works for task_work, we can find it set with nothing to
@@ -270,62 +467,46 @@ static inline int io_run_task_work(void)
*/
if (test_thread_flag(TIF_NOTIFY_SIGNAL))
clear_notify_signal();
+ /*
+ * PF_IO_WORKER never returns to userspace, so check here if we have
+ * notify work that needs processing.
+ */
+ if (current->flags & PF_IO_WORKER) {
+ if (test_thread_flag(TIF_NOTIFY_RESUME)) {
+ __set_current_state(TASK_RUNNING);
+ resume_user_mode_work(NULL);
+ }
+ if (current->io_uring) {
+ unsigned int count = 0;
+
+ __set_current_state(TASK_RUNNING);
+ tctx_task_work_run(current->io_uring, UINT_MAX, &count);
+ if (count)
+ ret = true;
+ }
+ }
if (task_work_pending(current)) {
__set_current_state(TASK_RUNNING);
task_work_run();
- return 1;
+ ret = true;
}
- return 0;
+ return ret;
}
-static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
+static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
{
- return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
+ return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
}
-static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
-{
- int ret = 0;
- int ret2;
-
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- ret = io_run_local_work(ctx);
-
- /* want to run this after in case more is added */
- ret2 = io_run_task_work();
-
- /* Try propagate error in favour of if tasks were run,
- * but still make sure to run them if requested
- */
- if (ret >= 0)
- ret += ret2;
-
- return ret;
-}
-
-static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
+static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
- bool locked;
- int ret;
-
- if (llist_empty(&ctx->work_llist))
- return 0;
-
- locked = true;
- ret = __io_run_local_work(ctx, &locked);
- /* shouldn't happen! */
- if (WARN_ON_ONCE(!locked))
- mutex_lock(&ctx->uring_lock);
- return ret;
+ return task_work_pending(current) || io_local_work_pending(ctx);
}
-static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
+static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
{
- if (!*locked) {
- mutex_lock(&ctx->uring_lock);
- *locked = true;
- }
+ lockdep_assert_held(&ctx->uring_lock);
}
/*
@@ -345,19 +526,11 @@ static inline void io_req_complete_defer(struct io_kiocb *req)
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
+ if (unlikely(ctx->off_timeout_used ||
+ ctx->has_evfd || ctx->poll_activated))
__io_commit_cqring_flush(ctx);
}
-/* must to be called somewhat shortly after putting a request */
-static inline void io_put_task(struct task_struct *task, int nr)
-{
- if (likely(task == current))
- task->io_uring->cached_refs += nr;
- else
- __io_put_task(task, nr);
-}
-
static inline void io_get_task_refs(int nr)
{
struct io_uring_task *tctx = current->io_uring;
@@ -372,19 +545,30 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
return !ctx->submit_state.free_list.next;
}
-static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
+extern struct kmem_cache *req_cachep;
+
+static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{
- if (unlikely(io_req_cache_empty(ctx)))
- return __io_alloc_req_refill(ctx);
- return true;
+ struct io_kiocb *req;
+
+ req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
+ wq_stack_extract(&ctx->submit_state.free_list);
+ return req;
}
-static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
+static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req)
{
- struct io_wq_work_node *node;
+ if (unlikely(io_req_cache_empty(ctx))) {
+ if (!__io_alloc_req_refill(ctx))
+ return false;
+ }
+ *req = io_extract_req(ctx);
+ return true;
+}
- node = wq_stack_extract(&ctx->submit_state.free_list);
- return container_of(node, struct io_kiocb, comp_list);
+static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
+{
+ return likely(ctx->submitter_task == current);
}
static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
@@ -400,4 +584,33 @@ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
io_req_task_work_add(req);
}
+static inline bool io_file_can_poll(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_CAN_POLL)
+ return true;
+ if (req->file && file_can_poll(req->file)) {
+ req->flags |= REQ_F_CAN_POLL;
+ return true;
+ }
+ return false;
+}
+
+static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
+{
+ if (ctx->clockid == CLOCK_MONOTONIC)
+ return ktime_get();
+
+ return ktime_get_with_offset(ctx->clock_offset);
+}
+
+enum {
+ IO_CHECK_CQ_OVERFLOW_BIT,
+ IO_CHECK_CQ_DROPPED_BIT,
+};
+
+static inline bool io_has_work(struct io_ring_ctx *ctx)
+{
+ return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
+ io_local_work_pending(ctx);
+}
#endif
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 4a6401080c1f..796d131107dd 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/poll.h>
+#include <linux/vmalloc.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
@@ -14,25 +15,66 @@
#include "io_uring.h"
#include "opdef.h"
#include "kbuf.h"
+#include "memmap.h"
-#define IO_BUFFER_LIST_BUF_PER_PAGE (PAGE_SIZE / sizeof(struct io_uring_buf))
+/* BIDs are addressed by a 16-bit field in a CQE */
+#define MAX_BIDS_PER_BGID (1 << 16)
-#define BGID_ARRAY 64
+/* Mapped buffer ring, return io_uring_buf from head */
+#define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
struct io_provide_buf {
struct file *file;
__u64 addr;
__u32 len;
__u32 bgid;
- __u16 nbufs;
+ __u32 nbufs;
__u16 bid;
};
+static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
+{
+ while (len) {
+ struct io_uring_buf *buf;
+ u32 buf_len, this_len;
+
+ buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
+ buf_len = READ_ONCE(buf->len);
+ this_len = min_t(u32, len, buf_len);
+ buf_len -= this_len;
+ /* Stop looping for invalid buffer length of 0 */
+ if (buf_len || !this_len) {
+ WRITE_ONCE(buf->addr, READ_ONCE(buf->addr) + this_len);
+ WRITE_ONCE(buf->len, buf_len);
+ return false;
+ }
+ WRITE_ONCE(buf->len, 0);
+ bl->head++;
+ len -= this_len;
+ }
+ return true;
+}
+
+bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
+{
+ if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
+ return true;
+
+ req->flags &= ~REQ_F_BUFFERS_COMMIT;
+
+ if (unlikely(len < 0))
+ return true;
+ if (bl->flags & IOBL_INC)
+ return io_kbuf_inc_commit(bl, len);
+ bl->head += nr;
+ return true;
+}
+
static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
unsigned int bgid)
{
- if (ctx->io_bl && bgid < BGID_ARRAY)
- return &ctx->io_bl[bgid];
+ lockdep_assert_held(&ctx->uring_lock);
return xa_load(&ctx->io_bl_xa, bgid);
}
@@ -40,71 +82,41 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
static int io_buffer_add_list(struct io_ring_ctx *ctx,
struct io_buffer_list *bl, unsigned int bgid)
{
+ /*
+ * Store buffer group ID and finally mark the list as visible.
+ * The normal lookup doesn't care about the visibility as we're
+ * always under the ->uring_lock, but lookups from mmap do.
+ */
bl->bgid = bgid;
- if (bgid < BGID_ARRAY)
- return 0;
-
+ guard(mutex)(&ctx->mmap_lock);
return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
+void io_kbuf_drop_legacy(struct io_kiocb *req)
+{
+ if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED)))
+ return;
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ kfree(req->kbuf);
+ req->kbuf = NULL;
+}
+
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
struct io_buffer *buf;
- /*
- * For legacy provided buffer mode, don't recycle if we already did
- * IO to this buffer. For ring-mapped provided buffer mode, we should
- * increment ring->head to explicitly monopolize the buffer to avoid
- * multiple use.
- */
- if (req->flags & REQ_F_PARTIAL_IO)
- return;
-
io_ring_submit_lock(ctx, issue_flags);
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
list_add(&buf->list, &bl->buf_list);
+ bl->nbufs++;
req->flags &= ~REQ_F_BUFFER_SELECTED;
- req->buf_index = buf->bgid;
io_ring_submit_unlock(ctx, issue_flags);
- return;
-}
-
-unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
-{
- unsigned int cflags;
-
- /*
- * We can add this buffer back to two lists:
- *
- * 1) The io_buffers_cache list. This one is protected by the
- * ctx->uring_lock. If we already hold this lock, add back to this
- * list as we can grab it from issue as well.
- * 2) The io_buffers_comp list. This one is protected by the
- * ctx->completion_lock.
- *
- * We migrate buffers from the comp_list to the issue cache list
- * when we need one.
- */
- if (req->flags & REQ_F_BUFFER_RING) {
- /* no buffers to recycle for this case */
- cflags = __io_put_kbuf_list(req, NULL);
- } else if (issue_flags & IO_URING_F_UNLOCKED) {
- struct io_ring_ctx *ctx = req->ctx;
-
- spin_lock(&ctx->completion_lock);
- cflags = __io_put_kbuf_list(req, &ctx->io_buffers_comp);
- spin_unlock(&ctx->completion_lock);
- } else {
- lockdep_assert_held(&req->ctx->uring_lock);
-
- cflags = __io_put_kbuf_list(req, &req->ctx->io_buffers_cache);
- }
- return cflags;
+ return true;
}
static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
@@ -115,8 +127,11 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&kbuf->list);
+ bl->nbufs--;
if (*len == 0 || *len > kbuf->len)
*len = kbuf->len;
+ if (list_empty(&bl->buf_list))
+ req->flags |= REQ_F_BL_EMPTY;
req->flags |= REQ_F_BUFFER_SELECTED;
req->kbuf = kbuf;
req->buf_index = kbuf->bid;
@@ -125,149 +140,324 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
return NULL;
}
-static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
- struct io_buffer_list *bl,
- unsigned int issue_flags)
+static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
+ struct io_buffer_list *bl,
+ struct iovec *iov)
+{
+ void __user *buf;
+
+ buf = io_provided_buffer_select(req, len, bl);
+ if (unlikely(!buf))
+ return -ENOBUFS;
+
+ iov[0].iov_base = buf;
+ iov[0].iov_len = *len;
+ return 1;
+}
+
+static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * If we came in unlocked, we have no choice but to consume the
+ * buffer here, otherwise nothing ensures that the buffer won't
+ * get used by others. This does mean it'll be pinned until the
+ * IO completes, coming in unlocked means we're being called from
+ * io-wq context and there may be further retries in async hybrid
+ * mode. For the locked case, the caller must call commit when
+ * the transfer completes (or if we get -EAGAIN and must poll of
+ * retry).
+ */
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return true;
+
+ /* uring_cmd commits kbuf upfront, no need to auto-commit */
+ if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD)
+ return true;
+ return false;
+}
+
+static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ struct io_buffer_list *bl,
+ unsigned int issue_flags)
{
struct io_uring_buf_ring *br = bl->buf_ring;
+ __u16 tail, head = bl->head;
+ struct io_br_sel sel = { };
struct io_uring_buf *buf;
- __u16 head = bl->head;
+ u32 buf_len;
+
+ tail = smp_load_acquire(&br->tail);
+ if (unlikely(tail == head))
+ return sel;
+
+ if (head + 1 == tail)
+ req->flags |= REQ_F_BL_EMPTY;
+
+ buf = io_ring_head_to_buf(br, head, bl->mask);
+ buf_len = READ_ONCE(buf->len);
+ if (*len == 0 || *len > buf_len)
+ *len = buf_len;
+ req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
+ req->buf_index = READ_ONCE(buf->bid);
+ sel.buf_list = bl;
+ sel.addr = u64_to_user_ptr(READ_ONCE(buf->addr));
+
+ if (io_should_commit(req, issue_flags)) {
+ io_kbuf_commit(req, sel.buf_list, *len, 1);
+ sel.buf_list = NULL;
+ }
+ return sel;
+}
- if (unlikely(smp_load_acquire(&br->tail) == head))
- return NULL;
+struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
+ unsigned buf_group, unsigned int issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_br_sel sel = { };
+ struct io_buffer_list *bl;
- head &= bl->mask;
- if (head < IO_BUFFER_LIST_BUF_PER_PAGE) {
- buf = &br->bufs[head];
- } else {
- int off = head & (IO_BUFFER_LIST_BUF_PER_PAGE - 1);
- int index = head / IO_BUFFER_LIST_BUF_PER_PAGE;
- buf = page_address(bl->buf_pages[index]);
- buf += off;
+ io_ring_submit_lock(req->ctx, issue_flags);
+
+ bl = io_buffer_get_list(ctx, buf_group);
+ if (likely(bl)) {
+ if (bl->flags & IOBL_BUF_RING)
+ sel = io_ring_buffer_select(req, len, bl, issue_flags);
+ else
+ sel.addr = io_provided_buffer_select(req, len, bl);
+ }
+ io_ring_submit_unlock(req->ctx, issue_flags);
+ return sel;
+}
+
+/* cap it at a reasonable 256, will be one page even for 4K */
+#define PEEK_MAX_IMPORT 256
+
+static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_buffer_list *bl)
+{
+ struct io_uring_buf_ring *br = bl->buf_ring;
+ struct iovec *iov = arg->iovs;
+ int nr_iovs = arg->nr_iovs;
+ __u16 nr_avail, tail, head;
+ struct io_uring_buf *buf;
+
+ tail = smp_load_acquire(&br->tail);
+ head = bl->head;
+ nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
+ if (unlikely(!nr_avail))
+ return -ENOBUFS;
+
+ buf = io_ring_head_to_buf(br, head, bl->mask);
+ if (arg->max_len) {
+ u32 len = READ_ONCE(buf->len);
+ size_t needed;
+
+ if (unlikely(!len))
+ return -ENOBUFS;
+ needed = (arg->max_len + len - 1) / len;
+ needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
+ if (nr_avail > needed)
+ nr_avail = needed;
+ }
+
+ /*
+ * only alloc a bigger array if we know we have data to map, eg not
+ * a speculative peek operation.
+ */
+ if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
+ iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
+ if (arg->mode & KBUF_MODE_FREE)
+ kfree(arg->iovs);
+ arg->iovs = iov;
+ nr_iovs = nr_avail;
+ } else if (nr_avail < nr_iovs) {
+ nr_iovs = nr_avail;
}
- if (*len == 0 || *len > buf->len)
- *len = buf->len;
+
+ /* set it to max, if not set, so we can use it unconditionally */
+ if (!arg->max_len)
+ arg->max_len = INT_MAX;
+
+ req->buf_index = READ_ONCE(buf->bid);
+ do {
+ u32 len = READ_ONCE(buf->len);
+
+ /* truncate end piece, if needed, for non partial buffers */
+ if (len > arg->max_len) {
+ len = arg->max_len;
+ if (!(bl->flags & IOBL_INC)) {
+ arg->partial_map = 1;
+ if (iov != arg->iovs)
+ break;
+ WRITE_ONCE(buf->len, len);
+ }
+ }
+
+ iov->iov_base = u64_to_user_ptr(READ_ONCE(buf->addr));
+ iov->iov_len = len;
+ iov++;
+
+ arg->out_len += len;
+ arg->max_len -= len;
+ if (!arg->max_len)
+ break;
+
+ buf = io_ring_head_to_buf(br, ++head, bl->mask);
+ } while (--nr_iovs);
+
+ if (head == tail)
+ req->flags |= REQ_F_BL_EMPTY;
+
req->flags |= REQ_F_BUFFER_RING;
- req->buf_list = bl;
- req->buf_index = buf->bid;
+ return iov - arg->iovs;
+}
+
+int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret = -ENOENT;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ sel->buf_list = io_buffer_get_list(ctx, arg->buf_group);
+ if (unlikely(!sel->buf_list))
+ goto out_unlock;
- if (issue_flags & IO_URING_F_UNLOCKED || !file_can_poll(req->file)) {
+ if (sel->buf_list->flags & IOBL_BUF_RING) {
+ ret = io_ring_buffers_peek(req, arg, sel->buf_list);
/*
- * If we came in unlocked, we have no choice but to consume the
- * buffer here, otherwise nothing ensures that the buffer won't
- * get used by others. This does mean it'll be pinned until the
- * IO completes, coming in unlocked means we're being called from
- * io-wq context and there may be further retries in async hybrid
- * mode. For the locked case, the caller must call commit when
- * the transfer completes (or if we get -EAGAIN and must poll of
- * retry).
+ * Don't recycle these buffers if we need to go through poll.
+ * Nobody else can use them anyway, and holding on to provided
+ * buffers for a send/write operation would happen on the app
+ * side anyway with normal buffers. Besides, we already
+ * committed them, they cannot be put back in the queue.
*/
- req->buf_list = NULL;
- bl->head++;
+ if (ret > 0) {
+ req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
+ io_kbuf_commit(req, sel->buf_list, arg->out_len, ret);
+ }
+ } else {
+ ret = io_provided_buffers_select(req, &arg->out_len, sel->buf_list, arg->iovs);
+ }
+out_unlock:
+ if (issue_flags & IO_URING_F_UNLOCKED) {
+ sel->buf_list = NULL;
+ mutex_unlock(&ctx->uring_lock);
}
- return u64_to_user_ptr(buf->addr);
+ return ret;
}
-void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
- unsigned int issue_flags)
+int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_br_sel *sel)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_buffer_list *bl;
- void __user *ret = NULL;
+ int ret;
- io_ring_submit_lock(req->ctx, issue_flags);
+ lockdep_assert_held(&ctx->uring_lock);
- bl = io_buffer_get_list(ctx, req->buf_index);
- if (likely(bl)) {
- if (bl->buf_nr_pages)
- ret = io_ring_buffer_select(req, len, bl, issue_flags);
- else
- ret = io_provided_buffer_select(req, len, bl);
+ bl = io_buffer_get_list(ctx, arg->buf_group);
+ if (unlikely(!bl))
+ return -ENOENT;
+
+ if (bl->flags & IOBL_BUF_RING) {
+ ret = io_ring_buffers_peek(req, arg, bl);
+ if (ret > 0)
+ req->flags |= REQ_F_BUFFERS_COMMIT;
+ sel->buf_list = bl;
+ return ret;
}
- io_ring_submit_unlock(req->ctx, issue_flags);
+
+ /* don't support multiple buffer selections for legacy */
+ sel->buf_list = NULL;
+ return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
+}
+
+static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr)
+{
+ bool ret = true;
+
+ if (bl)
+ ret = io_kbuf_commit(req, bl, len, nr);
+
+ req->flags &= ~REQ_F_BUFFER_RING;
return ret;
}
-static __cold int io_init_bl_list(struct io_ring_ctx *ctx)
+unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
+ int len, int nbufs)
{
- int i;
+ unsigned int ret;
- ctx->io_bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list),
- GFP_KERNEL);
- if (!ctx->io_bl)
- return -ENOMEM;
+ ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
- for (i = 0; i < BGID_ARRAY; i++) {
- INIT_LIST_HEAD(&ctx->io_bl[i].buf_list);
- ctx->io_bl[i].bgid = i;
+ if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) {
+ io_kbuf_drop_legacy(req);
+ return ret;
}
- return 0;
+ if (!__io_put_kbuf_ring(req, bl, len, nbufs))
+ ret |= IORING_CQE_F_BUF_MORE;
+ return ret;
}
-static int __io_remove_buffers(struct io_ring_ctx *ctx,
- struct io_buffer_list *bl, unsigned nbufs)
+static int io_remove_buffers_legacy(struct io_ring_ctx *ctx,
+ struct io_buffer_list *bl,
+ unsigned long nbufs)
{
- unsigned i = 0;
-
- /* shouldn't happen */
- if (!nbufs)
- return 0;
-
- if (bl->buf_nr_pages) {
- int j;
-
- i = bl->buf_ring->tail - bl->head;
- for (j = 0; j < bl->buf_nr_pages; j++)
- unpin_user_page(bl->buf_pages[j]);
- kvfree(bl->buf_pages);
- bl->buf_pages = NULL;
- bl->buf_nr_pages = 0;
- /* make sure it's seen as empty */
- INIT_LIST_HEAD(&bl->buf_list);
- return i;
- }
+ unsigned long i = 0;
+ struct io_buffer *nxt;
- /* the head kbuf is the list itself */
- while (!list_empty(&bl->buf_list)) {
- struct io_buffer *nxt;
+ /* protects io_buffers_cache */
+ lockdep_assert_held(&ctx->uring_lock);
+ WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
+ for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
list_del(&nxt->list);
- if (++i == nbufs)
- return i;
+ bl->nbufs--;
+ kfree(nxt);
cond_resched();
}
- i++;
-
return i;
}
+static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+{
+ if (bl->flags & IOBL_BUF_RING)
+ io_free_region(ctx->user, &bl->region);
+ else
+ io_remove_buffers_legacy(ctx, bl, -1U);
+
+ kfree(bl);
+}
+
void io_destroy_buffers(struct io_ring_ctx *ctx)
{
struct io_buffer_list *bl;
- unsigned long index;
- int i;
- for (i = 0; i < BGID_ARRAY; i++) {
- if (!ctx->io_bl)
- break;
- __io_remove_buffers(ctx, &ctx->io_bl[i], -1U);
- }
+ while (1) {
+ unsigned long index = 0;
- xa_for_each(&ctx->io_bl_xa, index, bl) {
- xa_erase(&ctx->io_bl_xa, bl->bgid);
- __io_remove_buffers(ctx, bl, -1U);
- kfree(bl);
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
+ if (bl)
+ xa_erase(&ctx->io_bl_xa, bl->bgid);
+ }
+ if (!bl)
+ break;
+ io_put_bl(ctx, bl);
}
+}
- while (!list_empty(&ctx->io_buffers_pages)) {
- struct page *page;
-
- page = list_first_entry(&ctx->io_buffers_pages, struct page, lru);
- list_del_init(&page->lru);
- __free_page(page);
- }
+static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
+{
+ scoped_guard(mutex, &ctx->mmap_lock)
+ WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
+ io_put_bl(ctx, bl);
}
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -280,7 +470,7 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
tmp = READ_ONCE(sqe->fd);
- if (!tmp || tmp > USHRT_MAX)
+ if (!tmp || tmp > MAX_BIDS_PER_BGID)
return -EINVAL;
memset(p, 0, sizeof(*p));
@@ -289,30 +479,6 @@ int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
- struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer_list *bl;
- int ret = 0;
-
- io_ring_submit_lock(ctx, issue_flags);
-
- ret = -ENOENT;
- bl = io_buffer_get_list(ctx, p->bgid);
- if (bl) {
- ret = -EINVAL;
- /* can't use provide/remove buffers command on mapped buffers */
- if (!bl->buf_nr_pages)
- ret = __io_remove_buffers(ctx, bl, p->nbufs);
- }
- io_ring_submit_unlock(ctx, issue_flags);
- if (ret < 0)
- req_set_fail(req);
- io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
unsigned long size, tmp_check;
@@ -323,19 +489,19 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL;
tmp = READ_ONCE(sqe->fd);
- if (!tmp || tmp > USHRT_MAX)
+ if (!tmp || tmp > MAX_BIDS_PER_BGID)
return -E2BIG;
p->nbufs = tmp;
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
+ if (!p->len)
+ return -EINVAL;
if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
&size))
return -EOVERFLOW;
if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
return -EOVERFLOW;
-
- size = (unsigned long)p->len * p->nbufs;
if (!access_ok(u64_to_user_ptr(p->addr), size))
return -EFAULT;
@@ -343,69 +509,35 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
tmp = READ_ONCE(sqe->off);
if (tmp > USHRT_MAX)
return -E2BIG;
- if (tmp + p->nbufs >= USHRT_MAX)
+ if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
return -EINVAL;
p->bid = tmp;
return 0;
}
-static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
-{
- struct io_buffer *buf;
- struct page *page;
- int bufs_in_page;
-
- /*
- * Completions that don't happen inline (eg not under uring_lock) will
- * add to ->io_buffers_comp. If we don't have any free buffers, check
- * the completion list and splice those entries first.
- */
- if (!list_empty_careful(&ctx->io_buffers_comp)) {
- spin_lock(&ctx->completion_lock);
- if (!list_empty(&ctx->io_buffers_comp)) {
- list_splice_init(&ctx->io_buffers_comp,
- &ctx->io_buffers_cache);
- spin_unlock(&ctx->completion_lock);
- return 0;
- }
- spin_unlock(&ctx->completion_lock);
- }
-
- /*
- * No free buffers and no completion entries either. Allocate a new
- * page worth of buffer entries and add those to our freelist.
- */
- page = alloc_page(GFP_KERNEL_ACCOUNT);
- if (!page)
- return -ENOMEM;
-
- list_add(&page->lru, &ctx->io_buffers_pages);
-
- buf = page_address(page);
- bufs_in_page = PAGE_SIZE / sizeof(*buf);
- while (bufs_in_page) {
- list_add_tail(&buf->list, &ctx->io_buffers_cache);
- buf++;
- bufs_in_page--;
- }
-
- return 0;
-}
-
static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
struct io_buffer_list *bl)
{
struct io_buffer *buf;
u64 addr = pbuf->addr;
- int i, bid = pbuf->bid;
+ int ret = -ENOMEM, i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
- if (list_empty(&ctx->io_buffers_cache) &&
- io_refill_buffer_cache(ctx))
+ /*
+ * Nonsensical to have more than sizeof(bid) buffers in a
+ * buffer list, as the application then has no way of knowing
+ * which duplicate bid refers to what buffer.
+ */
+ if (bl->nbufs == USHRT_MAX) {
+ ret = -EOVERFLOW;
break;
- buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
- list);
- list_move_tail(&buf->list, &bl->buf_list);
+ }
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
+ if (!buf)
+ break;
+
+ list_add_tail(&buf->list, &bl->buf_list);
+ bl->nbufs++;
buf->addr = addr;
buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
buf->bid = bid;
@@ -415,111 +547,134 @@ static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
cond_resched();
}
- return i ? 0 : -ENOMEM;
+ return i ? 0 : ret;
}
-int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_manage_buffers_legacy(struct io_kiocb *req,
+ struct io_buffer_list *bl)
{
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
- struct io_ring_ctx *ctx = req->ctx;
- struct io_buffer_list *bl;
- int ret = 0;
-
- io_ring_submit_lock(ctx, issue_flags);
+ int ret;
- if (unlikely(p->bgid < BGID_ARRAY && !ctx->io_bl)) {
- ret = io_init_bl_list(ctx);
- if (ret)
- goto err;
- }
-
- bl = io_buffer_get_list(ctx, p->bgid);
- if (unlikely(!bl)) {
+ if (!bl) {
+ if (req->opcode != IORING_OP_PROVIDE_BUFFERS)
+ return -ENOENT;
bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
- if (!bl) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!bl)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&bl->buf_list);
- ret = io_buffer_add_list(ctx, bl, p->bgid);
+ ret = io_buffer_add_list(req->ctx, bl, p->bgid);
if (ret) {
kfree(bl);
- goto err;
+ return ret;
}
}
- /* can't add buffers via this command for a mapped buffer ring */
- if (bl->buf_nr_pages) {
- ret = -EINVAL;
- goto err;
- }
+ /* can't use provide/remove buffers command on mapped buffers */
+ if (bl->flags & IOBL_BUF_RING)
+ return -EINVAL;
+ if (req->opcode == IORING_OP_PROVIDE_BUFFERS)
+ return io_add_buffers(req->ctx, p, bl);
+ return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
+}
+
+int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_buffer_list *bl;
+ int ret;
- ret = io_add_buffers(ctx, p, bl);
-err:
+ io_ring_submit_lock(ctx, issue_flags);
+ bl = io_buffer_get_list(ctx, p->bgid);
+ ret = __io_manage_buffers_legacy(req, bl);
io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
{
- struct io_uring_buf_ring *br;
struct io_uring_buf_reg reg;
- struct io_buffer_list *bl, *free_bl = NULL;
- struct page **pages;
- int nr_pages;
+ struct io_buffer_list *bl;
+ struct io_uring_region_desc rd;
+ struct io_uring_buf_ring *br;
+ unsigned long mmap_offset;
+ unsigned long ring_size;
+ int ret;
+
+ lockdep_assert_held(&ctx->uring_lock);
if (copy_from_user(&reg, arg, sizeof(reg)))
return -EFAULT;
-
- if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
+ if (!mem_is_zero(reg.resv, sizeof(reg.resv)))
return -EINVAL;
- if (!reg.ring_addr)
- return -EFAULT;
- if (reg.ring_addr & ~PAGE_MASK)
+ if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
return -EINVAL;
if (!is_power_of_2(reg.ring_entries))
return -EINVAL;
-
/* cannot disambiguate full vs empty due to head/tail size */
if (reg.ring_entries >= 65536)
return -EINVAL;
- if (unlikely(reg.bgid < BGID_ARRAY && !ctx->io_bl)) {
- int ret = io_init_bl_list(ctx);
- if (ret)
- return ret;
- }
-
bl = io_buffer_get_list(ctx, reg.bgid);
if (bl) {
/* if mapped buffer ring OR classic exists, don't allow */
- if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
+ if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
return -EEXIST;
- } else {
- free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
- if (!bl)
- return -ENOMEM;
+ io_destroy_bl(ctx, bl);
+ }
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
+ if (!bl)
+ return -ENOMEM;
+
+ mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
+ ring_size = flex_array_size(br, bufs, reg.ring_entries);
+
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(ring_size);
+ if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
+ rd.user_addr = reg.ring_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
}
+ ret = io_create_region(ctx, &bl->region, &rd, mmap_offset);
+ if (ret)
+ goto fail;
+ br = io_region_get_ptr(&bl->region);
- pages = io_pin_pages(reg.ring_addr,
- struct_size(br, bufs, reg.ring_entries),
- &nr_pages);
- if (IS_ERR(pages)) {
- kfree(free_bl);
- return PTR_ERR(pages);
+#ifdef SHM_COLOUR
+ /*
+ * On platforms that have specific aliasing requirements, SHM_COLOUR
+ * is set and we must guarantee that the kernel and user side align
+ * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
+ * the application mmap's the provided ring buffer. Fail the request
+ * if we, by chance, don't end up with aligned addresses. The app
+ * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
+ * this transparently.
+ */
+ if (!(reg.flags & IOU_PBUF_RING_MMAP) &&
+ ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) {
+ ret = -EINVAL;
+ goto fail;
}
+#endif
- br = page_address(pages[0]);
- bl->buf_pages = pages;
- bl->buf_nr_pages = nr_pages;
bl->nr_entries = reg.ring_entries;
- bl->buf_ring = br;
bl->mask = reg.ring_entries - 1;
+ bl->flags |= IOBL_BUF_RING;
+ bl->buf_ring = br;
+ if (reg.flags & IOU_PBUF_RING_INC)
+ bl->flags |= IOBL_INC;
io_buffer_add_list(ctx, bl, reg.bgid);
return 0;
+fail:
+ io_free_region(ctx->user, &bl->region);
+ kfree(bl);
+ return ret;
}
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
@@ -527,21 +682,58 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
struct io_uring_buf_reg reg;
struct io_buffer_list *bl;
+ lockdep_assert_held(&ctx->uring_lock);
+
if (copy_from_user(&reg, arg, sizeof(reg)))
return -EFAULT;
- if (reg.pad || reg.resv[0] || reg.resv[1] || reg.resv[2])
+ if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags)
return -EINVAL;
bl = io_buffer_get_list(ctx, reg.bgid);
if (!bl)
return -ENOENT;
- if (!bl->buf_nr_pages)
+ if (!(bl->flags & IOBL_BUF_RING))
return -EINVAL;
- __io_remove_buffers(ctx, bl, -1U);
- if (bl->bgid >= BGID_ARRAY) {
+ scoped_guard(mutex, &ctx->mmap_lock)
xa_erase(&ctx->io_bl_xa, bl->bgid);
- kfree(bl);
- }
+
+ io_put_bl(ctx, bl);
return 0;
}
+
+int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_buf_status buf_status;
+ struct io_buffer_list *bl;
+
+ if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
+ return -EFAULT;
+ if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv)))
+ return -EINVAL;
+
+ bl = io_buffer_get_list(ctx, buf_status.buf_group);
+ if (!bl)
+ return -ENOENT;
+ if (!(bl->flags & IOBL_BUF_RING))
+ return -EINVAL;
+
+ buf_status.head = bl->head;
+ if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
+ unsigned int bgid)
+{
+ struct io_buffer_list *bl;
+
+ lockdep_assert_held(&ctx->mmap_lock);
+
+ bl = xa_load(&ctx->io_bl_xa, bgid);
+ if (!bl || !(bl->flags & IOBL_BUF_RING))
+ return NULL;
+ return &bl->region;
+}
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index c23e15d7d3ca..bf15e26520d3 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -3,26 +3,37 @@
#define IOU_KBUF_H
#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+enum {
+ /* ring mapped provided buffers */
+ IOBL_BUF_RING = 1,
+ /* buffers are consumed incrementally rather than always fully */
+ IOBL_INC = 2,
+};
struct io_buffer_list {
/*
- * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
- * then these are classic provided buffers and ->buf_list is used.
+ * If the IOBL_BUF_RING flag is set, then buf_ring is used. If not, then
+ * these are classic provided buffers and ->buf_list is used.
*/
union {
struct list_head buf_list;
- struct {
- struct page **buf_pages;
- struct io_uring_buf_ring *buf_ring;
- };
+ struct io_uring_buf_ring *buf_ring;
};
+ /* count of classic/legacy buffers in buffer list */
+ int nbufs;
+
__u16 bgid;
/* below is for ring provided buffers */
- __u16 buf_nr_pages;
__u16 nr_entries;
__u16 head;
__u16 mask;
+
+ __u16 flags;
+
+ struct io_mapped_region region;
};
struct io_buffer {
@@ -33,48 +44,58 @@ struct io_buffer {
__u16 bgid;
};
-void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
- unsigned int issue_flags);
+enum {
+ /* can alloc a bigger vec */
+ KBUF_MODE_EXPAND = 1,
+ /* if bigger vec allocated, free old one */
+ KBUF_MODE_FREE = 2,
+};
+
+struct buf_sel_arg {
+ struct iovec *iovs;
+ size_t out_len;
+ size_t max_len;
+ unsigned short nr_iovs;
+ unsigned short mode;
+ unsigned short buf_group;
+ unsigned short partial_map;
+};
+
+struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
+ unsigned buf_group, unsigned int issue_flags);
+int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_br_sel *sel, unsigned int issue_flags);
+int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
+ struct io_br_sel *sel);
void io_destroy_buffers(struct io_ring_ctx *ctx);
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
-
int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
+int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags);
int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
+int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
+
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+void io_kbuf_drop_legacy(struct io_kiocb *req);
-unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
+unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
+ int len, int nbufs);
+bool io_kbuf_commit(struct io_kiocb *req,
+ struct io_buffer_list *bl, int len, int nr);
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
+ unsigned int bgid);
-static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+static inline bool io_kbuf_recycle_ring(struct io_kiocb *req,
+ struct io_buffer_list *bl)
{
- /*
- * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
- * the flag and hence ensure that bl->head doesn't get incremented.
- * If the tail has already been incremented, hang on to it.
- * The exception is partial io, that case we should increment bl->head
- * to monopolize the buffer.
- */
- if (req->buf_list) {
- if (req->flags & REQ_F_PARTIAL_IO) {
- /*
- * If we end up here, then the io_uring_lock has
- * been kept held since we retrieved the buffer.
- * For the io-wq case, we already cleared
- * req->buf_list when the buffer was retrieved,
- * hence it cannot be set here for that case.
- */
- req->buf_list->head++;
- req->buf_list = NULL;
- } else {
- req->buf_index = req->buf_list->bgid;
- req->flags &= ~REQ_F_BUFFER_RING;
- }
+ if (bl) {
+ req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
+ return true;
}
+ return false;
}
static inline bool io_do_buffer_select(struct io_kiocb *req)
@@ -84,49 +105,31 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
-static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl,
+ unsigned issue_flags)
{
- if (req->flags & REQ_F_BUFFER_SELECTED)
- io_kbuf_recycle_legacy(req, issue_flags);
+ if (req->flags & REQ_F_BL_NO_RECYCLE)
+ return false;
if (req->flags & REQ_F_BUFFER_RING)
- io_kbuf_recycle_ring(req);
-}
-
-static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
- struct list_head *list)
-{
- unsigned int ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
-
- if (req->flags & REQ_F_BUFFER_RING) {
- if (req->buf_list) {
- req->buf_index = req->buf_list->bgid;
- req->buf_list->head++;
- }
- req->flags &= ~REQ_F_BUFFER_RING;
- } else {
- req->buf_index = req->kbuf->bgid;
- list_add(&req->kbuf->list, list);
- req->flags &= ~REQ_F_BUFFER_SELECTED;
- }
-
- return ret;
+ return io_kbuf_recycle_ring(req, bl);
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ return io_kbuf_recycle_legacy(req, issue_flags);
+ return false;
}
-static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
+static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
+ struct io_buffer_list *bl)
{
- lockdep_assert_held(&req->ctx->completion_lock);
-
- if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
+ if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
- return __io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
+ return __io_put_kbufs(req, bl, len, 1);
}
-static inline unsigned int io_put_kbuf(struct io_kiocb *req,
- unsigned issue_flags)
+static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
+ struct io_buffer_list *bl, int nbufs)
{
-
- if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
+ if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
- return __io_put_kbuf(req, issue_flags);
+ return __io_put_kbufs(req, bl, len, nbufs);
}
#endif
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
new file mode 100644
index 000000000000..18e574776ef6
--- /dev/null
+++ b/io_uring/memmap.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <asm/shmparam.h>
+
+#include "memmap.h"
+#include "kbuf.h"
+#include "rsrc.h"
+#include "zcrx.h"
+
+static bool io_mem_alloc_compound(struct page **pages, int nr_pages,
+ size_t size, gfp_t gfp)
+{
+ struct page *page;
+ int i, order;
+
+ order = get_order(size);
+ if (order > MAX_PAGE_ORDER)
+ return false;
+ else if (order)
+ gfp |= __GFP_COMP;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+ return false;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
+ return true;
+}
+
+struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages)
+{
+ unsigned long start, end, nr_pages;
+ struct page **pages;
+ int ret;
+
+ if (check_add_overflow(uaddr, len, &end))
+ return ERR_PTR(-EOVERFLOW);
+ if (check_add_overflow(end, PAGE_SIZE - 1, &end))
+ return ERR_PTR(-EOVERFLOW);
+
+ end = end >> PAGE_SHIFT;
+ start = uaddr >> PAGE_SHIFT;
+ nr_pages = end - start;
+ if (WARN_ON_ONCE(!nr_pages))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(nr_pages > INT_MAX))
+ return ERR_PTR(-EOVERFLOW);
+
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ /* success, mapped all pages */
+ if (ret == nr_pages) {
+ *npages = nr_pages;
+ return pages;
+ }
+
+ /* partial map, or didn't map anything */
+ if (ret >= 0) {
+ /* if we did partial map, release any pages we did get */
+ if (ret)
+ unpin_user_pages(pages, ret);
+ ret = -EFAULT;
+ }
+ kvfree(pages);
+ return ERR_PTR(ret);
+}
+
+enum {
+ /* memory was vmap'ed for the kernel, freeing the region vunmap's it */
+ IO_REGION_F_VMAP = 1,
+ /* memory is provided by user and pinned by the kernel */
+ IO_REGION_F_USER_PROVIDED = 2,
+ /* only the first page in the array is ref'ed */
+ IO_REGION_F_SINGLE_REF = 4,
+};
+
+void io_free_region(struct user_struct *user, struct io_mapped_region *mr)
+{
+ if (mr->pages) {
+ long nr_refs = mr->nr_pages;
+
+ if (mr->flags & IO_REGION_F_SINGLE_REF)
+ nr_refs = 1;
+
+ if (mr->flags & IO_REGION_F_USER_PROVIDED)
+ unpin_user_pages(mr->pages, nr_refs);
+ else
+ release_pages(mr->pages, nr_refs);
+
+ kvfree(mr->pages);
+ }
+ if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr)
+ vunmap(mr->ptr);
+ if (mr->nr_pages && user)
+ __io_unaccount_mem(user, mr->nr_pages);
+
+ memset(mr, 0, sizeof(*mr));
+}
+
+static int io_region_init_ptr(struct io_mapped_region *mr)
+{
+ struct io_imu_folio_data ifd;
+ void *ptr;
+
+ if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) {
+ if (ifd.nr_folios == 1 && !PageHighMem(mr->pages[0])) {
+ mr->ptr = page_address(mr->pages[0]);
+ return 0;
+ }
+ }
+ ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ mr->ptr = ptr;
+ mr->flags |= IO_REGION_F_VMAP;
+ return 0;
+}
+
+static int io_region_pin_pages(struct io_mapped_region *mr,
+ struct io_uring_region_desc *reg)
+{
+ size_t size = io_region_size(mr);
+ struct page **pages;
+ int nr_pages;
+
+ pages = io_pin_pages(reg->user_addr, size, &nr_pages);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ if (WARN_ON_ONCE(nr_pages != mr->nr_pages))
+ return -EFAULT;
+
+ mr->pages = pages;
+ mr->flags |= IO_REGION_F_USER_PROVIDED;
+ return 0;
+}
+
+static int io_region_allocate_pages(struct io_mapped_region *mr,
+ struct io_uring_region_desc *reg,
+ unsigned long mmap_offset)
+{
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
+ size_t size = io_region_size(mr);
+ unsigned long nr_allocated;
+ struct page **pages;
+
+ pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp);
+ if (!pages)
+ return -ENOMEM;
+
+ if (io_mem_alloc_compound(pages, mr->nr_pages, size, gfp)) {
+ mr->flags |= IO_REGION_F_SINGLE_REF;
+ goto done;
+ }
+
+ nr_allocated = alloc_pages_bulk_node(gfp, NUMA_NO_NODE,
+ mr->nr_pages, pages);
+ if (nr_allocated != mr->nr_pages) {
+ if (nr_allocated)
+ release_pages(pages, nr_allocated);
+ kvfree(pages);
+ return -ENOMEM;
+ }
+done:
+ reg->mmap_offset = mmap_offset;
+ mr->pages = pages;
+ return 0;
+}
+
+int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
+ struct io_uring_region_desc *reg,
+ unsigned long mmap_offset)
+{
+ int nr_pages, ret;
+ u64 end;
+
+ if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages))
+ return -EFAULT;
+ if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv)))
+ return -EINVAL;
+ if (reg->flags & ~IORING_MEM_REGION_TYPE_USER)
+ return -EINVAL;
+ /* user_addr should be set IFF it's a user memory backed region */
+ if ((reg->flags & IORING_MEM_REGION_TYPE_USER) != !!reg->user_addr)
+ return -EFAULT;
+ if (!reg->size || reg->mmap_offset || reg->id)
+ return -EINVAL;
+ if ((reg->size >> PAGE_SHIFT) > INT_MAX)
+ return -E2BIG;
+ if ((reg->user_addr | reg->size) & ~PAGE_MASK)
+ return -EINVAL;
+ if (check_add_overflow(reg->user_addr, reg->size, &end))
+ return -EOVERFLOW;
+
+ nr_pages = reg->size >> PAGE_SHIFT;
+ if (ctx->user) {
+ ret = __io_account_mem(ctx->user, nr_pages);
+ if (ret)
+ return ret;
+ }
+ mr->nr_pages = nr_pages;
+
+ if (reg->flags & IORING_MEM_REGION_TYPE_USER)
+ ret = io_region_pin_pages(mr, reg);
+ else
+ ret = io_region_allocate_pages(mr, reg, mmap_offset);
+ if (ret)
+ goto out_free;
+
+ ret = io_region_init_ptr(mr);
+ if (ret)
+ goto out_free;
+ return 0;
+out_free:
+ io_free_region(ctx->user, mr);
+ return ret;
+}
+
+static struct io_mapped_region *io_mmap_get_region(struct io_ring_ctx *ctx,
+ loff_t pgoff)
+{
+ loff_t offset = pgoff << PAGE_SHIFT;
+ unsigned int id;
+
+
+ switch (offset & IORING_OFF_MMAP_MASK) {
+ case IORING_OFF_SQ_RING:
+ case IORING_OFF_CQ_RING:
+ return &ctx->ring_region;
+ case IORING_OFF_SQES:
+ return &ctx->sq_region;
+ case IORING_OFF_PBUF_RING:
+ id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
+ return io_pbuf_get_region(ctx, id);
+ case IORING_MAP_OFF_PARAM_REGION:
+ return &ctx->param_region;
+ case IORING_MAP_OFF_ZCRX_REGION:
+ id = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_ZCRX_SHIFT;
+ return io_zcrx_get_region(ctx, id);
+ }
+ return NULL;
+}
+
+static void *io_region_validate_mmap(struct io_ring_ctx *ctx,
+ struct io_mapped_region *mr)
+{
+ lockdep_assert_held(&ctx->mmap_lock);
+
+ if (!io_region_is_set(mr))
+ return ERR_PTR(-EINVAL);
+ if (mr->flags & IO_REGION_F_USER_PROVIDED)
+ return ERR_PTR(-EINVAL);
+
+ return io_region_get_ptr(mr);
+}
+
+static void *io_uring_validate_mmap_request(struct file *file, loff_t pgoff,
+ size_t sz)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+ struct io_mapped_region *region;
+
+ region = io_mmap_get_region(ctx, pgoff);
+ if (!region)
+ return ERR_PTR(-EINVAL);
+ return io_region_validate_mmap(ctx, region);
+}
+
+#ifdef CONFIG_MMU
+
+static int io_region_mmap(struct io_ring_ctx *ctx,
+ struct io_mapped_region *mr,
+ struct vm_area_struct *vma,
+ unsigned max_pages)
+{
+ unsigned long nr_pages = min(mr->nr_pages, max_pages);
+
+ vm_flags_set(vma, VM_DONTEXPAND);
+ return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages);
+}
+
+__cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+ size_t sz = vma->vm_end - vma->vm_start;
+ long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned int page_limit = UINT_MAX;
+ struct io_mapped_region *region;
+ void *ptr;
+
+ guard(mutex)(&ctx->mmap_lock);
+
+ ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ switch (offset & IORING_OFF_MMAP_MASK) {
+ case IORING_OFF_SQ_RING:
+ case IORING_OFF_CQ_RING:
+ page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ break;
+ }
+
+ region = io_mmap_get_region(ctx, vma->vm_pgoff);
+ return io_region_mmap(ctx, region, vma, page_limit);
+}
+
+unsigned long io_uring_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct io_ring_ctx *ctx = filp->private_data;
+ void *ptr;
+
+ /*
+ * Do not allow to map to user-provided address to avoid breaking the
+ * aliasing rules. Userspace is not able to guess the offset address of
+ * kernel kmalloc()ed memory area.
+ */
+ if (addr)
+ return -EINVAL;
+
+ guard(mutex)(&ctx->mmap_lock);
+
+ ptr = io_uring_validate_mmap_request(filp, pgoff, len);
+ if (IS_ERR(ptr))
+ return -ENOMEM;
+
+ /*
+ * Some architectures have strong cache aliasing requirements.
+ * For such architectures we need a coherent mapping which aliases
+ * kernel memory *and* userspace memory. To achieve that:
+ * - use a NULL file pointer to reference physical memory, and
+ * - use the kernel virtual address of the shared io_uring context
+ * (instead of the userspace-provided address, which has to be 0UL
+ * anyway).
+ * - use the same pgoff which the get_unmapped_area() uses to
+ * calculate the page colouring.
+ * For architectures without such aliasing requirements, the
+ * architecture will return any suitable mapping because addr is 0.
+ */
+ filp = NULL;
+ flags |= MAP_SHARED;
+ pgoff = 0; /* has been translated to ptr above */
+#ifdef SHM_COLOUR
+ addr = (uintptr_t) ptr;
+ pgoff = addr >> PAGE_SHIFT;
+#else
+ addr = 0UL;
+#endif
+ return mm_get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+#else /* !CONFIG_MMU */
+
+int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL;
+}
+
+unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
+{
+ return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
+}
+
+unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ struct io_ring_ctx *ctx = file->private_data;
+ void *ptr;
+
+ guard(mutex)(&ctx->mmap_lock);
+
+ ptr = io_uring_validate_mmap_request(file, pgoff, len);
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ return (unsigned long) ptr;
+}
+
+#endif /* !CONFIG_MMU */
diff --git a/io_uring/memmap.h b/io_uring/memmap.h
new file mode 100644
index 000000000000..a39d9e518905
--- /dev/null
+++ b/io_uring/memmap.h
@@ -0,0 +1,51 @@
+#ifndef IO_URING_MEMMAP_H
+#define IO_URING_MEMMAP_H
+
+#define IORING_MAP_OFF_PARAM_REGION 0x20000000ULL
+#define IORING_MAP_OFF_ZCRX_REGION 0x30000000ULL
+
+#define IORING_OFF_ZCRX_SHIFT 16
+
+struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages);
+
+#ifndef CONFIG_MMU
+unsigned int io_uring_nommu_mmap_capabilities(struct file *file);
+#endif
+unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+int io_uring_mmap(struct file *file, struct vm_area_struct *vma);
+
+void io_free_region(struct user_struct *user, struct io_mapped_region *mr);
+int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr,
+ struct io_uring_region_desc *reg,
+ unsigned long mmap_offset);
+
+static inline void *io_region_get_ptr(struct io_mapped_region *mr)
+{
+ return mr->ptr;
+}
+
+static inline bool io_region_is_set(struct io_mapped_region *mr)
+{
+ return !!mr->nr_pages;
+}
+
+static inline void io_region_publish(struct io_ring_ctx *ctx,
+ struct io_mapped_region *src_region,
+ struct io_mapped_region *dst_region)
+{
+ /*
+ * Once published mmap can find it without holding only the ->mmap_lock
+ * and not ->uring_lock.
+ */
+ guard(mutex)(&ctx->mmap_lock);
+ *dst_region = *src_region;
+}
+
+static inline size_t io_region_size(struct io_mapped_region *mr)
+{
+ return (size_t) mr->nr_pages << PAGE_SHIFT;
+}
+
+#endif
diff --git a/io_uring/mock_file.c b/io_uring/mock_file.c
new file mode 100644
index 000000000000..3ffac8f72974
--- /dev/null
+++ b/io_uring/mock_file.c
@@ -0,0 +1,350 @@
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/anon_inodes.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/poll.h>
+
+#include <linux/io_uring/cmd.h>
+#include <linux/io_uring_types.h>
+#include <uapi/linux/io_uring/mock_file.h>
+
+struct io_mock_iocb {
+ struct kiocb *iocb;
+ struct hrtimer timer;
+ int res;
+};
+
+struct io_mock_file {
+ size_t size;
+ u64 rw_delay_ns;
+ bool pollable;
+ struct wait_queue_head poll_wq;
+};
+
+#define IO_VALID_COPY_CMD_FLAGS IORING_MOCK_COPY_FROM
+
+static int io_copy_regbuf(struct iov_iter *reg_iter, void __user *ubuf)
+{
+ size_t ret, copied = 0;
+ size_t buflen = PAGE_SIZE;
+ void *tmp_buf;
+
+ tmp_buf = kzalloc(buflen, GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+
+ while (iov_iter_count(reg_iter)) {
+ size_t len = min(iov_iter_count(reg_iter), buflen);
+
+ if (iov_iter_rw(reg_iter) == ITER_SOURCE) {
+ ret = copy_from_iter(tmp_buf, len, reg_iter);
+ if (ret <= 0)
+ break;
+ if (copy_to_user(ubuf, tmp_buf, ret))
+ break;
+ } else {
+ if (copy_from_user(tmp_buf, ubuf, len))
+ break;
+ ret = copy_to_iter(tmp_buf, len, reg_iter);
+ if (ret <= 0)
+ break;
+ }
+ ubuf += ret;
+ copied += ret;
+ }
+
+ kfree(tmp_buf);
+ return copied;
+}
+
+static int io_cmd_copy_regbuf(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ const struct iovec __user *iovec;
+ unsigned flags, iovec_len;
+ struct iov_iter iter;
+ void __user *ubuf;
+ int dir, ret;
+
+ ubuf = u64_to_user_ptr(READ_ONCE(sqe->addr3));
+ iovec = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ iovec_len = READ_ONCE(sqe->len);
+ flags = READ_ONCE(sqe->file_index);
+
+ if (unlikely(sqe->ioprio || sqe->__pad1))
+ return -EINVAL;
+ if (flags & ~IO_VALID_COPY_CMD_FLAGS)
+ return -EINVAL;
+
+ dir = (flags & IORING_MOCK_COPY_FROM) ? ITER_SOURCE : ITER_DEST;
+ ret = io_uring_cmd_import_fixed_vec(cmd, iovec, iovec_len, dir, &iter,
+ issue_flags);
+ if (ret)
+ return ret;
+ ret = io_copy_regbuf(&iter, ubuf);
+ return ret ? ret : -EFAULT;
+}
+
+static int io_mock_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ switch (cmd->cmd_op) {
+ case IORING_MOCK_CMD_COPY_REGBUF:
+ return io_cmd_copy_regbuf(cmd, issue_flags);
+ }
+ return -ENOTSUPP;
+}
+
+static enum hrtimer_restart io_mock_rw_timer_expired(struct hrtimer *timer)
+{
+ struct io_mock_iocb *mio = container_of(timer, struct io_mock_iocb, timer);
+ struct kiocb *iocb = mio->iocb;
+
+ WRITE_ONCE(iocb->private, NULL);
+ iocb->ki_complete(iocb, mio->res);
+ kfree(mio);
+ return HRTIMER_NORESTART;
+}
+
+static ssize_t io_mock_delay_rw(struct kiocb *iocb, size_t len)
+{
+ struct io_mock_file *mf = iocb->ki_filp->private_data;
+ struct io_mock_iocb *mio;
+
+ mio = kzalloc(sizeof(*mio), GFP_KERNEL);
+ if (!mio)
+ return -ENOMEM;
+
+ mio->iocb = iocb;
+ mio->res = len;
+ hrtimer_setup(&mio->timer, io_mock_rw_timer_expired,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_start(&mio->timer, ns_to_ktime(mf->rw_delay_ns),
+ HRTIMER_MODE_REL);
+ return -EIOCBQUEUED;
+}
+
+static ssize_t io_mock_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct io_mock_file *mf = iocb->ki_filp->private_data;
+ size_t len = iov_iter_count(to);
+ size_t nr_zeroed;
+
+ if (iocb->ki_pos + len > mf->size)
+ return -EINVAL;
+ nr_zeroed = iov_iter_zero(len, to);
+ if (!mf->rw_delay_ns || nr_zeroed != len)
+ return nr_zeroed;
+
+ return io_mock_delay_rw(iocb, len);
+}
+
+static ssize_t io_mock_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct io_mock_file *mf = iocb->ki_filp->private_data;
+ size_t len = iov_iter_count(from);
+
+ if (iocb->ki_pos + len > mf->size)
+ return -EINVAL;
+ if (!mf->rw_delay_ns) {
+ iov_iter_advance(from, len);
+ return len;
+ }
+
+ return io_mock_delay_rw(iocb, len);
+}
+
+static loff_t io_mock_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct io_mock_file *mf = file->private_data;
+
+ return fixed_size_llseek(file, offset, whence, mf->size);
+}
+
+static __poll_t io_mock_poll(struct file *file, struct poll_table_struct *pt)
+{
+ struct io_mock_file *mf = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &mf->poll_wq, pt);
+
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ mask |= EPOLLIN | EPOLLRDNORM;
+ return mask;
+}
+
+static int io_mock_release(struct inode *inode, struct file *file)
+{
+ struct io_mock_file *mf = file->private_data;
+
+ kfree(mf);
+ return 0;
+}
+
+static const struct file_operations io_mock_fops = {
+ .owner = THIS_MODULE,
+ .release = io_mock_release,
+ .uring_cmd = io_mock_cmd,
+ .read_iter = io_mock_read_iter,
+ .write_iter = io_mock_write_iter,
+ .llseek = io_mock_llseek,
+};
+
+static const struct file_operations io_mock_poll_fops = {
+ .owner = THIS_MODULE,
+ .release = io_mock_release,
+ .uring_cmd = io_mock_cmd,
+ .read_iter = io_mock_read_iter,
+ .write_iter = io_mock_write_iter,
+ .llseek = io_mock_llseek,
+ .poll = io_mock_poll,
+};
+
+#define IO_VALID_CREATE_FLAGS (IORING_MOCK_CREATE_F_SUPPORT_NOWAIT | \
+ IORING_MOCK_CREATE_F_POLL)
+
+static int io_create_mock_file(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ const struct file_operations *fops = &io_mock_fops;
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ struct io_uring_mock_create mc, __user *uarg;
+ struct file *file;
+ struct io_mock_file *mf __free(kfree) = NULL;
+ size_t uarg_size;
+
+ /*
+ * It's a testing only driver that allows exercising edge cases
+ * that wouldn't be possible to hit otherwise.
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ uarg_size = READ_ONCE(sqe->len);
+
+ if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index)
+ return -EINVAL;
+ if (uarg_size != sizeof(mc))
+ return -EINVAL;
+
+ memset(&mc, 0, sizeof(mc));
+ if (copy_from_user(&mc, uarg, uarg_size))
+ return -EFAULT;
+ if (!mem_is_zero(mc.__resv, sizeof(mc.__resv)))
+ return -EINVAL;
+ if (mc.flags & ~IO_VALID_CREATE_FLAGS)
+ return -EINVAL;
+ if (mc.file_size > SZ_1G)
+ return -EINVAL;
+ if (mc.rw_delay_ns > NSEC_PER_SEC)
+ return -EINVAL;
+
+ mf = kzalloc(sizeof(*mf), GFP_KERNEL_ACCOUNT);
+ if (!mf)
+ return -ENOMEM;
+
+ init_waitqueue_head(&mf->poll_wq);
+ mf->size = mc.file_size;
+ mf->rw_delay_ns = mc.rw_delay_ns;
+ if (mc.flags & IORING_MOCK_CREATE_F_POLL) {
+ fops = &io_mock_poll_fops;
+ mf->pollable = true;
+ }
+
+ FD_PREPARE(fdf, O_RDWR | O_CLOEXEC,
+ anon_inode_create_getfile("[io_uring_mock]", fops, mf,
+ O_RDWR | O_CLOEXEC, NULL));
+ if (fdf.err)
+ return fdf.err;
+
+ retain_and_null_ptr(mf);
+ file = fd_prepare_file(fdf);
+ file->f_mode |= FMODE_READ | FMODE_CAN_READ | FMODE_WRITE |
+ FMODE_CAN_WRITE | FMODE_LSEEK;
+ if (mc.flags & IORING_MOCK_CREATE_F_SUPPORT_NOWAIT)
+ file->f_mode |= FMODE_NOWAIT;
+
+ mc.out_fd = fd_prepare_fd(fdf);
+ if (copy_to_user(uarg, &mc, uarg_size))
+ return -EFAULT;
+
+ fd_publish(fdf);
+ return 0;
+}
+
+static int io_probe_mock(struct io_uring_cmd *cmd)
+{
+ const struct io_uring_sqe *sqe = cmd->sqe;
+ struct io_uring_mock_probe mp, __user *uarg;
+ size_t uarg_size;
+
+ uarg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ uarg_size = READ_ONCE(sqe->len);
+
+ if (sqe->ioprio || sqe->__pad1 || sqe->addr3 || sqe->file_index ||
+ uarg_size != sizeof(mp))
+ return -EINVAL;
+
+ memset(&mp, 0, sizeof(mp));
+ if (copy_from_user(&mp, uarg, uarg_size))
+ return -EFAULT;
+ if (!mem_is_zero(&mp, sizeof(mp)))
+ return -EINVAL;
+
+ mp.features = IORING_MOCK_FEAT_END;
+
+ if (copy_to_user(uarg, &mp, uarg_size))
+ return -EFAULT;
+ return 0;
+}
+
+static int iou_mock_mgr_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd->cmd_op) {
+ case IORING_MOCK_MGR_CMD_PROBE:
+ return io_probe_mock(cmd);
+ case IORING_MOCK_MGR_CMD_CREATE:
+ return io_create_mock_file(cmd, issue_flags);
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct file_operations iou_mock_dev_fops = {
+ .owner = THIS_MODULE,
+ .uring_cmd = iou_mock_mgr_cmd,
+};
+
+static struct miscdevice iou_mock_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "io_uring_mock",
+ .fops = &iou_mock_dev_fops,
+};
+
+static int __init io_mock_init(void)
+{
+ int ret;
+
+ ret = misc_register(&iou_mock_miscdev);
+ if (ret < 0) {
+ pr_err("Could not initialize io_uring mock device\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit io_mock_exit(void)
+{
+ misc_deregister(&iou_mock_miscdev);
+}
+
+module_init(io_mock_init)
+module_exit(io_mock_exit)
+
+MODULE_AUTHOR("Pavel Begunkov <asml.silence@gmail.com>");
+MODULE_DESCRIPTION("io_uring mock file");
+MODULE_LICENSE("GPL");
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 15602a136821..7063ea7964e7 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -13,6 +13,10 @@
#include "filetable.h"
#include "msg_ring.h"
+/* All valid masks for MSG_RING */
+#define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
+ IORING_MSG_RING_FLAGS_PASS)
+
struct io_msg {
struct file *file;
struct file *src_file;
@@ -21,7 +25,10 @@ struct io_msg {
u32 len;
u32 cmd;
u32 src_fd;
- u32 dst_fd;
+ union {
+ u32 dst_fd;
+ u32 cqe_flags;
+ };
u32 flags;
};
@@ -30,8 +37,8 @@ static void io_double_unlock_ctx(struct io_ring_ctx *octx)
mutex_unlock(&octx->uring_lock);
}
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
+static int io_lock_external_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
{
/*
* To ensure proper ordering between the two ctxs, we can only
@@ -60,102 +67,111 @@ void io_msg_ring_cleanup(struct io_kiocb *req)
static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
{
- if (!target_ctx->task_complete)
- return false;
- return current != target_ctx->submitter_task;
+ return target_ctx->task_complete;
}
-static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
- struct io_ring_ctx *ctx = req->file->private_data;
- struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
- struct task_struct *task = READ_ONCE(ctx->submitter_task);
+ struct io_kiocb *req = tw_req.req;
+ struct io_ring_ctx *ctx = req->ctx;
- if (unlikely(!task))
- return -EOWNERDEAD;
+ io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
+ kfree_rcu(req, rcu_head);
+ percpu_ref_put(&ctx->refs);
+}
- init_task_work(&msg->tw, func);
- if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
+static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ int res, u32 cflags, u64 user_data)
+{
+ if (!READ_ONCE(ctx->submitter_task)) {
+ kfree_rcu(req, rcu_head);
return -EOWNERDEAD;
-
- return IOU_ISSUE_SKIP_COMPLETE;
+ }
+ req->opcode = IORING_OP_NOP;
+ req->cqe.user_data = user_data;
+ io_req_set_res(req, res, cflags);
+ percpu_ref_get(&ctx->refs);
+ req->ctx = ctx;
+ req->tctx = NULL;
+ req->io_task_work.func = io_msg_tw_complete;
+ io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
+ return 0;
}
-static void io_msg_tw_complete(struct callback_head *head)
+static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
+ struct io_msg *msg)
{
- struct io_msg *msg = container_of(head, struct io_msg, tw);
- struct io_kiocb *req = cmd_to_io_kiocb(msg);
- struct io_ring_ctx *target_ctx = req->file->private_data;
- int ret = 0;
-
- if (current->flags & PF_EXITING) {
- ret = -EOWNERDEAD;
- } else {
- /*
- * If the target ring is using IOPOLL mode, then we need to be
- * holding the uring_lock for posting completions. Other ring
- * types rely on the regular completion locking, which is
- * handled while posting.
- */
- if (target_ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&target_ctx->uring_lock);
- if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = -EOVERFLOW;
- if (target_ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&target_ctx->uring_lock);
- }
+ struct io_kiocb *target;
+ u32 flags = 0;
- if (ret < 0)
- req_set_fail(req);
- io_req_queue_tw_complete(req, ret);
+ target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
+ if (unlikely(!target))
+ return -ENOMEM;
+
+ if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
+ flags = msg->cqe_flags;
+
+ return io_msg_remote_post(target_ctx, target, msg->len, flags,
+ msg->user_data);
}
-static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
+ struct io_msg *msg, unsigned int issue_flags)
{
- struct io_ring_ctx *target_ctx = req->file->private_data;
- struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ u32 flags = 0;
int ret;
- if (msg->src_fd || msg->dst_fd || msg->flags)
+ if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
+ return -EINVAL;
+ if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
return -EBADFD;
if (io_msg_need_remote(target_ctx))
- return io_msg_exec_remote(req, io_msg_tw_complete);
+ return io_msg_data_remote(target_ctx, msg);
+
+ if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
+ flags = msg->cqe_flags;
ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) {
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = 0;
- io_double_unlock_ctx(target_ctx);
- } else {
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = 0;
}
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
+ ret = 0;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ io_double_unlock_ctx(target_ctx);
return ret;
}
-static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ring_ctx *target_ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+
+ return __io_msg_ring_data(target_ctx, msg, issue_flags);
+}
+
+static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
- struct file *file = NULL;
- unsigned long file_ptr;
- int idx = msg->src_fd;
+ struct io_rsrc_node *node;
+ int ret = -EBADF;
io_ring_submit_lock(ctx, issue_flags);
- if (likely(idx < ctx->nr_user_files)) {
- idx = array_index_nospec(idx, ctx->nr_user_files);
- file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
- file = (struct file *) (file_ptr & FFS_MASK);
- if (file)
- get_file(file);
+ node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
+ if (node) {
+ msg->src_file = io_slot_file(node);
+ if (msg->src_file)
+ get_file(msg->src_file);
+ req->flags |= REQ_F_NEED_CLEANUP;
+ ret = 0;
}
io_ring_submit_unlock(ctx, issue_flags);
- return file;
+ return ret;
}
static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
@@ -165,7 +181,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
struct file *src_file = msg->src_file;
int ret;
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
@@ -183,7 +199,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
* completes with -EOVERFLOW, then the sender must ensure that a
* later IORING_OP_MSG_RING delivers the message.
*/
- if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
ret = -EOVERFLOW;
out_unlock:
io_double_unlock_ctx(target_ctx);
@@ -203,34 +219,47 @@ static void io_msg_tw_fd_complete(struct callback_head *head)
io_req_queue_tw_complete(req, ret);
}
+static int io_msg_fd_remote(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->file->private_data;
+ struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+ if (unlikely(!task))
+ return -EOWNERDEAD;
+
+ init_task_work(&msg->tw, io_msg_tw_fd_complete);
+ if (task_work_add(task, &msg->tw, TWA_SIGNAL))
+ return -EOWNERDEAD;
+
+ return IOU_ISSUE_SKIP_COMPLETE;
+}
+
static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
- struct file *src_file = msg->src_file;
+ if (msg->len)
+ return -EINVAL;
if (target_ctx == ctx)
return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
return -EBADFD;
- if (!src_file) {
- src_file = io_msg_grab_file(req, issue_flags);
- if (!src_file)
- return -EBADF;
- msg->src_file = src_file;
- req->flags |= REQ_F_NEED_CLEANUP;
+ if (!msg->src_file) {
+ int ret = io_msg_grab_file(req, issue_flags);
+ if (unlikely(ret))
+ return ret;
}
if (io_msg_need_remote(target_ctx))
- return io_msg_exec_remote(req, io_msg_tw_fd_complete);
+ return io_msg_fd_remote(req);
return io_msg_install_complete(req, issue_flags);
}
-int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
{
- struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
-
if (unlikely(sqe->buf_index || sqe->personality))
return -EINVAL;
@@ -241,12 +270,17 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
msg->src_fd = READ_ONCE(sqe->addr3);
msg->dst_fd = READ_ONCE(sqe->file_index);
msg->flags = READ_ONCE(sqe->msg_ring_flags);
- if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
+ if (msg->flags & ~IORING_MSG_RING_MASK)
return -EINVAL;
return 0;
}
+int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
+}
+
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -275,5 +309,30 @@ done:
req_set_fail(req);
}
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
+}
+
+int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
+{
+ struct io_msg io_msg = { };
+ int ret;
+
+ ret = __io_msg_ring_prep(&io_msg, sqe);
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * Only data sending supported, not IORING_MSG_SEND_FD as that one
+ * doesn't make sense without a source ring to send files from.
+ */
+ if (io_msg.cmd != IORING_MSG_DATA)
+ return -EINVAL;
+
+ CLASS(fd, f)(sqe->fd);
+ if (fd_empty(f))
+ return -EBADF;
+ if (!io_is_uring_fops(fd_file(f)))
+ return -EBADFD;
+ return __io_msg_ring_data(fd_file(f)->private_data,
+ &io_msg, IO_URING_F_UNLOCKED);
}
diff --git a/io_uring/msg_ring.h b/io_uring/msg_ring.h
index 3987ee6c0e5f..32236d2fb778 100644
--- a/io_uring/msg_ring.h
+++ b/io_uring/msg_ring.h
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+int io_uring_sync_msg_ring(struct io_uring_sqe *sqe);
int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags);
void io_msg_ring_cleanup(struct io_kiocb *req);
diff --git a/io_uring/napi.c b/io_uring/napi.c
new file mode 100644
index 000000000000..4a10de03e426
--- /dev/null
+++ b/io_uring/napi.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "io_uring.h"
+#include "napi.h"
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+/* Timeout for cleanout of stale entries. */
+#define NAPI_TIMEOUT (60 * SEC_CONVERSION)
+
+struct io_napi_entry {
+ unsigned int napi_id;
+ struct list_head list;
+
+ unsigned long timeout;
+ struct hlist_node node;
+
+ struct rcu_head rcu;
+};
+
+static struct io_napi_entry *io_napi_hash_find(struct hlist_head *hash_list,
+ unsigned int napi_id)
+{
+ struct io_napi_entry *e;
+
+ hlist_for_each_entry_rcu(e, hash_list, node) {
+ if (e->napi_id != napi_id)
+ continue;
+ return e;
+ }
+
+ return NULL;
+}
+
+static inline ktime_t net_to_ktime(unsigned long t)
+{
+ /* napi approximating usecs, reverse busy_loop_current_time */
+ return ns_to_ktime(t << 10);
+}
+
+int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
+{
+ struct hlist_head *hash_list;
+ struct io_napi_entry *e;
+
+ /* Non-NAPI IDs can be rejected. */
+ if (!napi_id_valid(napi_id))
+ return -EINVAL;
+
+ hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
+
+ scoped_guard(rcu) {
+ e = io_napi_hash_find(hash_list, napi_id);
+ if (e) {
+ WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
+ return -EEXIST;
+ }
+ }
+
+ e = kmalloc(sizeof(*e), GFP_NOWAIT);
+ if (!e)
+ return -ENOMEM;
+
+ e->napi_id = napi_id;
+ e->timeout = jiffies + NAPI_TIMEOUT;
+
+ /*
+ * guard(spinlock) is not used to manually unlock it before calling
+ * kfree()
+ */
+ spin_lock(&ctx->napi_lock);
+ if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
+ spin_unlock(&ctx->napi_lock);
+ kfree(e);
+ return -EEXIST;
+ }
+
+ hlist_add_tail_rcu(&e->node, hash_list);
+ list_add_tail_rcu(&e->list, &ctx->napi_list);
+ spin_unlock(&ctx->napi_lock);
+ return 0;
+}
+
+static int __io_napi_del_id(struct io_ring_ctx *ctx, unsigned int napi_id)
+{
+ struct hlist_head *hash_list;
+ struct io_napi_entry *e;
+
+ /* Non-NAPI IDs can be rejected. */
+ if (!napi_id_valid(napi_id))
+ return -EINVAL;
+
+ hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
+ guard(spinlock)(&ctx->napi_lock);
+ e = io_napi_hash_find(hash_list, napi_id);
+ if (!e)
+ return -ENOENT;
+
+ list_del_rcu(&e->list);
+ hash_del_rcu(&e->node);
+ kfree_rcu(e, rcu);
+ return 0;
+}
+
+static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
+{
+ struct io_napi_entry *e;
+
+ guard(spinlock)(&ctx->napi_lock);
+ /*
+ * list_for_each_entry_safe() is not required as long as:
+ * 1. list_del_rcu() does not reset the deleted node next pointer
+ * 2. kfree_rcu() delays the memory freeing until the next quiescent
+ * state
+ */
+ list_for_each_entry(e, &ctx->napi_list, list) {
+ if (time_after(jiffies, READ_ONCE(e->timeout))) {
+ list_del_rcu(&e->list);
+ hash_del_rcu(&e->node);
+ kfree_rcu(e, rcu);
+ }
+ }
+}
+
+static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
+{
+ if (is_stale)
+ __io_napi_remove_stale(ctx);
+}
+
+static inline bool io_napi_busy_loop_timeout(ktime_t start_time,
+ ktime_t bp)
+{
+ if (bp) {
+ ktime_t end_time = ktime_add(start_time, bp);
+ ktime_t now = net_to_ktime(busy_loop_current_time());
+
+ return ktime_after(now, end_time);
+ }
+
+ return true;
+}
+
+static bool io_napi_busy_loop_should_end(void *data,
+ unsigned long start_time)
+{
+ struct io_wait_queue *iowq = data;
+
+ if (signal_pending(current))
+ return true;
+ if (io_should_wake(iowq) || io_has_work(iowq->ctx))
+ return true;
+ if (io_napi_busy_loop_timeout(net_to_ktime(start_time),
+ iowq->napi_busy_poll_dt))
+ return true;
+
+ return false;
+}
+
+/*
+ * never report stale entries
+ */
+static bool static_tracking_do_busy_loop(struct io_ring_ctx *ctx,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg)
+{
+ struct io_napi_entry *e;
+
+ list_for_each_entry_rcu(e, &ctx->napi_list, list)
+ napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
+ ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
+ return false;
+}
+
+static bool
+dynamic_tracking_do_busy_loop(struct io_ring_ctx *ctx,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg)
+{
+ struct io_napi_entry *e;
+ bool is_stale = false;
+
+ list_for_each_entry_rcu(e, &ctx->napi_list, list) {
+ napi_busy_loop_rcu(e->napi_id, loop_end, loop_end_arg,
+ ctx->napi_prefer_busy_poll, BUSY_POLL_BUDGET);
+
+ if (time_after(jiffies, READ_ONCE(e->timeout)))
+ is_stale = true;
+ }
+
+ return is_stale;
+}
+
+static inline bool
+__io_napi_do_busy_loop(struct io_ring_ctx *ctx,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg)
+{
+ if (READ_ONCE(ctx->napi_track_mode) == IO_URING_NAPI_TRACKING_STATIC)
+ return static_tracking_do_busy_loop(ctx, loop_end, loop_end_arg);
+ return dynamic_tracking_do_busy_loop(ctx, loop_end, loop_end_arg);
+}
+
+static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq)
+{
+ unsigned long start_time = busy_loop_current_time();
+ bool (*loop_end)(void *, unsigned long) = NULL;
+ void *loop_end_arg = NULL;
+ bool is_stale = false;
+
+ /* Singular lists use a different napi loop end check function and are
+ * only executed once.
+ */
+ if (list_is_singular(&ctx->napi_list)) {
+ loop_end = io_napi_busy_loop_should_end;
+ loop_end_arg = iowq;
+ }
+
+ scoped_guard(rcu) {
+ do {
+ is_stale = __io_napi_do_busy_loop(ctx, loop_end,
+ loop_end_arg);
+ } while (!io_napi_busy_loop_should_end(iowq, start_time) &&
+ !loop_end_arg);
+ }
+
+ io_napi_remove_stale(ctx, is_stale);
+}
+
+/*
+ * io_napi_init() - Init napi settings
+ * @ctx: pointer to io-uring context structure
+ *
+ * Init napi settings in the io-uring context.
+ */
+void io_napi_init(struct io_ring_ctx *ctx)
+{
+ u64 sys_dt = READ_ONCE(sysctl_net_busy_poll) * NSEC_PER_USEC;
+
+ INIT_LIST_HEAD(&ctx->napi_list);
+ spin_lock_init(&ctx->napi_lock);
+ ctx->napi_prefer_busy_poll = false;
+ ctx->napi_busy_poll_dt = ns_to_ktime(sys_dt);
+ ctx->napi_track_mode = IO_URING_NAPI_TRACKING_INACTIVE;
+}
+
+/*
+ * io_napi_free() - Deallocate napi
+ * @ctx: pointer to io-uring context structure
+ *
+ * Free the napi list and the hash table in the io-uring context.
+ */
+void io_napi_free(struct io_ring_ctx *ctx)
+{
+ struct io_napi_entry *e;
+
+ guard(spinlock)(&ctx->napi_lock);
+ list_for_each_entry(e, &ctx->napi_list, list) {
+ hash_del_rcu(&e->node);
+ kfree_rcu(e, rcu);
+ }
+ INIT_LIST_HEAD_RCU(&ctx->napi_list);
+}
+
+static int io_napi_register_napi(struct io_ring_ctx *ctx,
+ struct io_uring_napi *napi)
+{
+ switch (napi->op_param) {
+ case IO_URING_NAPI_TRACKING_DYNAMIC:
+ case IO_URING_NAPI_TRACKING_STATIC:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* clean the napi list for new settings */
+ io_napi_free(ctx);
+ WRITE_ONCE(ctx->napi_track_mode, napi->op_param);
+ WRITE_ONCE(ctx->napi_busy_poll_dt, napi->busy_poll_to * NSEC_PER_USEC);
+ WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi->prefer_busy_poll);
+ return 0;
+}
+
+/*
+ * io_napi_register() - Register napi with io-uring
+ * @ctx: pointer to io-uring context structure
+ * @arg: pointer to io_uring_napi structure
+ *
+ * Register napi in the io-uring context.
+ */
+int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
+{
+ const struct io_uring_napi curr = {
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
+ .prefer_busy_poll = ctx->napi_prefer_busy_poll,
+ .op_param = ctx->napi_track_mode
+ };
+ struct io_uring_napi napi;
+
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ return -EINVAL;
+ if (copy_from_user(&napi, arg, sizeof(napi)))
+ return -EFAULT;
+ if (napi.pad[0] || napi.pad[1] || napi.resv)
+ return -EINVAL;
+
+ if (copy_to_user(arg, &curr, sizeof(curr)))
+ return -EFAULT;
+
+ switch (napi.opcode) {
+ case IO_URING_NAPI_REGISTER_OP:
+ return io_napi_register_napi(ctx, &napi);
+ case IO_URING_NAPI_STATIC_ADD_ID:
+ if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC)
+ return -EINVAL;
+ return __io_napi_add_id(ctx, napi.op_param);
+ case IO_URING_NAPI_STATIC_DEL_ID:
+ if (curr.op_param != IO_URING_NAPI_TRACKING_STATIC)
+ return -EINVAL;
+ return __io_napi_del_id(ctx, napi.op_param);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * io_napi_unregister() - Unregister napi with io-uring
+ * @ctx: pointer to io-uring context structure
+ * @arg: pointer to io_uring_napi structure
+ *
+ * Unregister napi. If arg has been specified copy the busy poll timeout and
+ * prefer busy poll setting to the passed in structure.
+ */
+int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
+{
+ const struct io_uring_napi curr = {
+ .busy_poll_to = ktime_to_us(ctx->napi_busy_poll_dt),
+ .prefer_busy_poll = ctx->napi_prefer_busy_poll
+ };
+
+ if (arg && copy_to_user(arg, &curr, sizeof(curr)))
+ return -EFAULT;
+
+ WRITE_ONCE(ctx->napi_busy_poll_dt, 0);
+ WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
+ WRITE_ONCE(ctx->napi_track_mode, IO_URING_NAPI_TRACKING_INACTIVE);
+ return 0;
+}
+
+/*
+ * __io_napi_busy_loop() - execute busy poll loop
+ * @ctx: pointer to io-uring context structure
+ * @iowq: pointer to io wait queue
+ *
+ * Execute the busy poll loop and merge the spliced off list.
+ */
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
+{
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ return;
+
+ iowq->napi_busy_poll_dt = READ_ONCE(ctx->napi_busy_poll_dt);
+ if (iowq->timeout != KTIME_MAX) {
+ ktime_t dt = ktime_sub(iowq->timeout, io_get_time(ctx));
+
+ iowq->napi_busy_poll_dt = min_t(u64, iowq->napi_busy_poll_dt, dt);
+ }
+
+ iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
+ io_napi_blocking_busy_loop(ctx, iowq);
+}
+
+/*
+ * io_napi_sqpoll_busy_poll() - busy poll loop for sqpoll
+ * @ctx: pointer to io-uring context structure
+ *
+ * Splice of the napi list and execute the napi busy poll loop.
+ */
+int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
+{
+ bool is_stale = false;
+
+ if (!READ_ONCE(ctx->napi_busy_poll_dt))
+ return 0;
+ if (list_empty_careful(&ctx->napi_list))
+ return 0;
+
+ scoped_guard(rcu) {
+ is_stale = __io_napi_do_busy_loop(ctx, NULL, NULL);
+ }
+
+ io_napi_remove_stale(ctx, is_stale);
+ return 1;
+}
+
+#endif
diff --git a/io_uring/napi.h b/io_uring/napi.h
new file mode 100644
index 000000000000..fa742f42e09b
--- /dev/null
+++ b/io_uring/napi.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef IOU_NAPI_H
+#define IOU_NAPI_H
+
+#include <linux/kernel.h>
+#include <linux/io_uring.h>
+#include <net/busy_poll.h>
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+
+void io_napi_init(struct io_ring_ctx *ctx);
+void io_napi_free(struct io_ring_ctx *ctx);
+
+int io_register_napi(struct io_ring_ctx *ctx, void __user *arg);
+int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg);
+
+int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id);
+
+void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq);
+int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx);
+
+static inline bool io_napi(struct io_ring_ctx *ctx)
+{
+ return !list_empty(&ctx->napi_list);
+}
+
+static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq)
+{
+ if (!io_napi(ctx))
+ return;
+ __io_napi_busy_loop(ctx, iowq);
+}
+
+/*
+ * io_napi_add() - Add napi id to the busy poll list
+ * @req: pointer to io_kiocb request
+ *
+ * Add the napi id of the socket to the napi busy poll list and hash table.
+ */
+static inline void io_napi_add(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct socket *sock;
+
+ if (READ_ONCE(ctx->napi_track_mode) != IO_URING_NAPI_TRACKING_DYNAMIC)
+ return;
+
+ sock = sock_from_file(req->file);
+ if (sock && sock->sk)
+ __io_napi_add_id(ctx, READ_ONCE(sock->sk->sk_napi_id));
+}
+
+#else
+
+static inline void io_napi_init(struct io_ring_ctx *ctx)
+{
+}
+static inline void io_napi_free(struct io_ring_ctx *ctx)
+{
+}
+static inline int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+static inline int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool io_napi(struct io_ring_ctx *ctx)
+{
+ return false;
+}
+static inline void io_napi_add(struct io_kiocb *req)
+{
+}
+static inline void io_napi_busy_loop(struct io_ring_ctx *ctx,
+ struct io_wait_queue *iowq)
+{
+}
+static inline int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
+{
+ return 0;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+#endif
diff --git a/io_uring/net.c b/io_uring/net.c
index 90326b279965..519ea055b761 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -10,14 +10,15 @@
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "net.h"
#include "notif.h"
#include "rsrc.h"
+#include "zcrx.h"
-#if defined(CONFIG_NET)
struct io_shutdown {
struct file *file;
int how;
@@ -28,6 +29,7 @@ struct io_accept {
struct sockaddr __user *addr;
int __user *addr_len;
int flags;
+ int iou_flags;
u32 file_slot;
unsigned long nofile;
};
@@ -47,6 +49,17 @@ struct io_connect {
struct sockaddr __user *addr;
int addr_len;
bool in_progress;
+ bool seen_econnaborted;
+};
+
+struct io_bind {
+ struct file *file;
+ int addr_len;
+};
+
+struct io_listen {
+ struct file *file;
+ int backlog;
};
struct io_sr_msg {
@@ -56,30 +69,56 @@ struct io_sr_msg {
struct user_msghdr __user *umsg;
void __user *buf;
};
- unsigned len;
+ int len;
unsigned done_io;
unsigned msg_flags;
+ unsigned nr_multishot_loops;
u16 flags;
/* initialised and used only by !msg send variants */
- u16 addr_len;
u16 buf_group;
- void __user *addr;
+ /* per-invocation mshot limit */
+ unsigned mshot_len;
+ /* overall mshot byte limit */
+ unsigned mshot_total_len;
+ void __user *msg_control;
/* used only for send zerocopy */
struct io_kiocb *notif;
};
-static inline bool io_check_multishot(struct io_kiocb *req,
- unsigned int issue_flags)
-{
- /*
- * When ->locked_cq is set we only allow to post CQEs from the original
- * task context. Usual request completions will be handled in other
- * generic paths but multipoll may decide to post extra cqes.
- */
- return !(issue_flags & IO_URING_F_IOWQ) ||
- !(issue_flags & IO_URING_F_MULTISHOT) ||
- !req->ctx->task_complete;
-}
+/*
+ * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold
+ * anyway. Use the upper 8 bits for internal uses.
+ */
+enum sr_retry_flags {
+ IORING_RECV_RETRY = (1U << 15),
+ IORING_RECV_PARTIAL_MAP = (1U << 14),
+ IORING_RECV_MSHOT_CAP = (1U << 13),
+ IORING_RECV_MSHOT_LIM = (1U << 12),
+ IORING_RECV_MSHOT_DONE = (1U << 11),
+
+ IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP,
+ IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP |
+ IORING_RECV_MSHOT_CAP | IORING_RECV_MSHOT_DONE,
+};
+
+/*
+ * Number of times we'll try and do receives if there's more data. If we
+ * exceed this limit, then add us to the back of the queue and retry from
+ * there. This helps fairness between flooding clients.
+ */
+#define MULTISHOT_MAX_RETRY 32
+
+struct io_recvzc {
+ struct file *file;
+ u16 flags;
+ u32 len;
+ struct io_zcrx_ifq *ifq;
+};
+
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+static int io_sg_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -90,6 +129,7 @@ int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
shutdown->how = READ_ONCE(sqe->len);
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -99,8 +139,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock;
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
sock = sock_from_file(req->file);
if (unlikely(!sock))
@@ -108,7 +147,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_shutdown_sock(sock, shutdown->how);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
static bool io_net_retry(struct socket *sock, int flags)
@@ -118,173 +157,387 @@ static bool io_net_retry(struct socket *sock, int flags)
return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
}
+static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
+{
+ if (kmsg->vec.iovec)
+ io_vec_free(&kmsg->vec);
+}
+
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
- if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
+ /* can't recycle, ensure we free the iovec if we have one */
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
+ io_netmsg_iovec_free(hdr);
return;
+ }
/* Let normal cleanup path reap it if we fail adding to the cache */
- if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
- req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
- }
+ io_alloc_cache_vec_kasan(&hdr->vec);
+ if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&hdr->vec);
+
+ if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr))
+ io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
}
-static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
- unsigned int issue_flags)
+static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_cache_entry *entry;
struct io_async_msghdr *hdr;
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- entry = io_alloc_cache_get(&ctx->netmsg_cache);
- if (entry) {
- hdr = container_of(entry, struct io_async_msghdr, cache);
- hdr->free_iov = NULL;
- req->flags |= REQ_F_ASYNC_DATA;
- req->async_data = hdr;
- return hdr;
- }
- }
+ hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
+ if (!hdr)
+ return NULL;
- if (!io_alloc_async_data(req)) {
- hdr = req->async_data;
- hdr->free_iov = NULL;
- return hdr;
- }
- return NULL;
+ /* If the async data was cached, we might have an iov cached inside. */
+ if (hdr->vec.iovec)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return hdr;
}
-static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
+static inline void io_mshot_prep_retry(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg)
{
- /* ->prep_async is always called from the submission context */
- return io_msg_alloc_async(req, 0);
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+
+ req->flags &= ~REQ_F_BL_EMPTY;
+ sr->done_io = 0;
+ sr->flags &= ~IORING_RECV_RETRY_CLEAR;
+ sr->len = sr->mshot_len;
}
-static int io_setup_async_msg(struct io_kiocb *req,
- struct io_async_msghdr *kmsg,
- unsigned int issue_flags)
+static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ const struct iovec __user *uiov, unsigned uvec_seg,
+ int ddir)
{
- struct io_async_msghdr *async_msg;
+ struct iovec *iov;
+ int ret, nr_segs;
- if (req_has_async_data(req))
- return -EAGAIN;
- async_msg = io_msg_alloc_async(req, issue_flags);
- if (!async_msg) {
- kfree(kmsg->free_iov);
- return -ENOMEM;
- }
- req->flags |= REQ_F_NEED_CLEANUP;
- memcpy(async_msg, kmsg, sizeof(*kmsg));
- if (async_msg->msg.msg_name)
- async_msg->msg.msg_name = &async_msg->addr;
- /* if were using fast_iov, set it to the new one */
- if (!kmsg->free_iov) {
- size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
- async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
+ if (iomsg->vec.iovec) {
+ nr_segs = iomsg->vec.nr;
+ iov = iomsg->vec.iovec;
+ } else {
+ nr_segs = 1;
+ iov = &iomsg->fast_iov;
}
- return -EAGAIN;
+ ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov,
+ &iomsg->msg.msg_iter, io_is_compat(req->ctx));
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (iov) {
+ req->flags |= REQ_F_NEED_CLEANUP;
+ io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs);
+ }
+ return 0;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ struct compat_msghdr *msg, int ddir,
+ struct sockaddr __user **save_addr)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct compat_iovec __user *uiov;
+ int ret;
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->free_iov = iomsg->fast_iov;
- return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
- &iomsg->free_iov);
-}
+ if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
+ return -EFAULT;
-int io_send_prep_async(struct io_kiocb *req)
-{
- struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *io;
- int ret;
+ ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr);
+ if (ret)
+ return ret;
- if (!zc->addr || req_has_async_data(req))
- return 0;
- io = io_msg_alloc_async_prep(req);
- if (!io)
- return -ENOMEM;
- ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
- return ret;
+ uiov = compat_ptr(msg->msg_iov);
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (msg->msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg->msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ struct compat_iovec tmp_iov;
+
+ if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
+ return -EFAULT;
+ sr->len = tmp_iov.iov_len;
+ }
+ }
+ return 0;
}
-static int io_setup_async_addr(struct io_kiocb *req,
- struct sockaddr_storage *addr_storage,
- unsigned int issue_flags)
+static int io_copy_msghdr_from_user(struct user_msghdr *msg,
+ struct user_msghdr __user *umsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *io;
-
- if (!sr->addr || req_has_async_data(req))
- return -EAGAIN;
- io = io_msg_alloc_async(req, issue_flags);
- if (!io)
- return -ENOMEM;
- memcpy(&io->addr, addr_storage, sizeof(io->addr));
- return -EAGAIN;
+ if (!user_access_begin(umsg, sizeof(*umsg)))
+ return -EFAULT;
+ unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
+ unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
+ unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
+ unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
+ unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
+ unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
+ user_access_end();
+ return 0;
+ua_end:
+ user_access_end();
+ return -EFAULT;
}
-int io_sendmsg_prep_async(struct io_kiocb *req)
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct user_msghdr *msg, int ddir,
+ struct sockaddr __user **save_addr)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct user_msghdr __user *umsg = sr->umsg;
int ret;
- if (!io_msg_alloc_async_prep(req))
- return -ENOMEM;
- ret = io_sendmsg_copy_hdr(req, req->async_data);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->msg.msg_iter.nr_segs = 0;
+
+ if (io_is_compat(req->ctx)) {
+ struct compat_msghdr cmsg;
+
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr);
+ if (ret)
+ return ret;
+
+ memset(msg, 0, sizeof(*msg));
+ msg->msg_namelen = cmsg.msg_namelen;
+ msg->msg_controllen = cmsg.msg_controllen;
+ msg->msg_iov = compat_ptr(cmsg.msg_iov);
+ msg->msg_iovlen = cmsg.msg_iovlen;
+ return 0;
+ }
+
+ ret = io_copy_msghdr_from_user(msg, umsg);
+ if (unlikely(ret))
+ return ret;
+
+ msg->msg_flags = 0;
+
+ ret = __copy_msghdr(&iomsg->msg, msg, save_addr);
+ if (ret)
+ return ret;
+
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (msg->msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg->msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ struct iovec __user *uiov = msg->msg_iov;
+ struct iovec tmp_iov;
+
+ if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
+ return -EFAULT;
+ sr->len = tmp_iov.iov_len;
+ }
+ }
+ return 0;
}
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
{
struct io_async_msghdr *io = req->async_data;
- kfree(io->free_iov);
+ io_netmsg_iovec_free(io);
}
-int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *kmsg = req->async_data;
+ void __user *addr;
+ u16 addr_len;
+ int ret;
- if (req->opcode == IORING_OP_SEND) {
- if (READ_ONCE(sqe->__pad3[0]))
- return -EINVAL;
- sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- sr->addr_len = READ_ONCE(sqe->addr_len);
- } else if (sqe->addr2 || sqe->file_index) {
+ sr->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+
+ if (READ_ONCE(sqe->__pad3[0]))
return -EINVAL;
+
+ kmsg->msg.msg_name = NULL;
+ kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_control = NULL;
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_ubuf = NULL;
+
+ addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ addr_len = READ_ONCE(sqe->addr_len);
+ if (addr) {
+ ret = move_addr_to_kernel(addr, addr_len, &kmsg->addr);
+ if (unlikely(ret < 0))
+ return ret;
+ kmsg->msg.msg_name = &kmsg->addr;
+ kmsg->msg.msg_namelen = addr_len;
+ }
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ req->flags |= REQ_F_IMPORT_BUFFER;
+ return 0;
}
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+
+ if (sr->flags & IORING_SEND_VECTORIZED)
+ return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+
+ return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
+}
+
+static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *kmsg = req->async_data;
+ struct user_msghdr msg;
+ int ret;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);
+ if (unlikely(ret))
+ return ret;
+ /* save msg_control as sys_sendmsg() overwrites it */
+ sr->msg_control = kmsg->msg.msg_control_user;
+
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
+ return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov,
+ msg.msg_iovlen);
+ }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
+}
+
+#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED)
+
+int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+
+ sr->done_io = 0;
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
- if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
+ if (sr->flags & ~SENDMSG_FLAGS)
return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ sr->buf_group = req->buf_index;
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ if (req->opcode == IORING_OP_SENDMSG)
+ return -EINVAL;
+ sr->msg_flags |= MSG_WAITALL;
+ req->flags |= REQ_F_MULTISHOT;
+ }
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
+ if (io_is_compat(req->ctx))
sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
- sr->done_io = 0;
- return 0;
+
+ if (unlikely(!io_msg_alloc_async(req)))
+ return -ENOMEM;
+ if (req->opcode != IORING_OP_SENDMSG)
+ return io_send_setup(req, sqe);
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
+ return io_sendmsg_setup(req, sqe);
+}
+
+static void io_req_msg_cleanup(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ io_netmsg_recycle(req, issue_flags);
+}
+
+/*
+ * For bundle completions, we need to figure out how many segments we consumed.
+ * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
+ * could be using an ITER_IOVEC. If the latter, then if we consumed all of
+ * the segments, then it's a trivial questiont o answer. If we have residual
+ * data in the iter, then loop the segments to figure out how much we
+ * transferred.
+ */
+static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
+{
+ struct iovec *iov;
+ int nbufs;
+
+ /* no data is always zero segments, and a ubuf is always 1 segment */
+ if (ret <= 0)
+ return 0;
+ if (iter_is_ubuf(&kmsg->msg.msg_iter))
+ return 1;
+
+ iov = kmsg->vec.iovec;
+ if (!iov)
+ iov = &kmsg->fast_iov;
+
+ /* if all data was transferred, it's basic pointer math */
+ if (!iov_iter_count(&kmsg->msg.msg_iter))
+ return iter_iov(&kmsg->msg.msg_iter) - iov;
+
+ /* short transfer, count segments */
+ nbufs = 0;
+ do {
+ int this_len = min_t(int, iov[nbufs].iov_len, ret);
+
+ nbufs++;
+ ret -= this_len;
+ } while (ret);
+
+ return nbufs;
+}
+
+static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl,
+ struct io_async_msghdr *kmsg, int len)
+{
+ req->flags |= REQ_F_BL_NO_RECYCLE;
+ if (req->flags & REQ_F_BUFFERS_COMMIT)
+ io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len));
+ return IOU_RETRY;
+}
+
+static inline bool io_send_finish(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg,
+ struct io_br_sel *sel)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ bool bundle_finished = sel->val <= 0;
+ unsigned int cflags;
+
+ if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
+ cflags = io_put_kbuf(req, sel->val, sel->buf_list);
+ goto finish;
+ }
+
+ cflags = io_put_kbufs(req, sel->val, sel->buf_list, io_bundle_nbufs(kmsg, sel->val));
+
+ if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
+ goto finish;
+
+ /*
+ * Fill CQE for this receive and see if we should keep trying to
+ * receive from this socket.
+ */
+ if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
+ io_mshot_prep_retry(req, kmsg);
+ return false;
+ }
+
+ /* Otherwise stop bundle and use the current result. */
+finish:
+ io_req_set_res(req, sel->val, cflags);
+ sel->val = IOU_COMPLETE;
+ return true;
}
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr iomsg, *kmsg;
+ struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned flags;
int min_ret = 0;
@@ -294,18 +547,9 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- ret = io_sendmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -313,94 +557,130 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ kmsg->msg.msg_control_user = sr->msg_control;
+
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
if (ret > 0 && io_net_retry(sock, flags)) {
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- io_netmsg_recycle(req, issue_flags);
+ io_req_msg_cleanup(req, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
-int io_send(struct io_kiocb *req, unsigned int issue_flags)
+static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
+ struct io_br_sel *sel, struct io_async_msghdr *kmsg)
{
- struct sockaddr_storage __address;
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct msghdr msg;
- struct iovec iov;
- struct socket *sock;
- unsigned flags;
- int min_ret = 0;
+ struct buf_sel_arg arg = {
+ .iovs = &kmsg->fast_iov,
+ .max_len = min_not_zero(sr->len, INT_MAX),
+ .nr_iovs = 1,
+ .buf_group = sr->buf_group,
+ };
int ret;
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
- msg.msg_ubuf = NULL;
+ if (kmsg->vec.iovec) {
+ arg.nr_iovs = kmsg->vec.nr;
+ arg.iovs = kmsg->vec.iovec;
+ arg.mode = KBUF_MODE_FREE;
+ }
- if (sr->addr) {
- if (req_has_async_data(req)) {
- struct io_async_msghdr *io = req->async_data;
+ if (!(sr->flags & IORING_RECVSEND_BUNDLE))
+ arg.nr_iovs = 1;
+ else
+ arg.mode |= KBUF_MODE_EXPAND;
- msg.msg_name = &io->addr;
- } else {
- ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
- if (unlikely(ret < 0))
- return ret;
- msg.msg_name = (struct sockaddr *)&__address;
- }
- msg.msg_namelen = sr->addr_len;
+ ret = io_buffers_select(req, &arg, sel, issue_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
+ kmsg->vec.nr = ret;
+ kmsg->vec.iovec = arg.iovs;
+ req->flags |= REQ_F_NEED_CLEANUP;
}
+ sr->len = arg.out_len;
- if (!(req->flags & REQ_F_POLLED) &&
- (sr->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_addr(req, &__address, issue_flags);
+ if (ret == 1) {
+ sr->buf = arg.iovs[0].iov_base;
+ ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
+ arg.iovs, ret, arg.out_len);
+ }
+
+ return 0;
+}
+
+int io_send(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel = { };
+ struct socket *sock;
+ unsigned flags;
+ int min_ret = 0;
+ int ret;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- return ret;
+ if (!(req->flags & REQ_F_POLLED) &&
+ (sr->flags & IORING_RECVSEND_POLL_FIRST))
+ return -EAGAIN;
flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
- msg.msg_flags = flags;
- ret = sock_sendmsg(sock, &msg);
+retry_bundle:
+ sel.buf_list = NULL;
+ if (io_do_buffer_select(req)) {
+ ret = io_send_select_buffer(req, issue_flags, &sel, kmsg);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * If MSG_WAITALL is set, or this is a bundle send, then we need
+ * the full amount. If just bundle is set, if we do a short send
+ * then we complete the bundle sequence rather than continue on.
+ */
+ if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
+ flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+ kmsg->msg.msg_flags = flags;
+ ret = sock_sendmsg(sock, &kmsg->msg);
if (ret < min_ret) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return io_setup_async_addr(req, &__address, issue_flags);
+ return -EAGAIN;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_addr(req, &__address, issue_flags);
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -410,236 +690,231 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
- io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
-{
- int hdr;
+ sel.val = ret;
+ if (!io_send_finish(req, kmsg, &sel))
+ goto retry_bundle;
- if (iomsg->namelen < 0)
- return true;
- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
- iomsg->namelen, &hdr))
- return true;
- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
- return true;
-
- return false;
+ io_req_msg_cleanup(req, issue_flags);
+ return sel.val;
}
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_recvmsg_mshot_prep(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ int namelen, size_t controllen)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
- int ret;
-
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
- return -EFAULT;
-
- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
- return ret;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- if (msg.msg_iovlen == 0) {
- sr->len = iomsg->fast_iov[0].iov_len = 0;
- iomsg->fast_iov[0].iov_base = NULL;
- iomsg->free_iov = NULL;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
- return -EFAULT;
- sr->len = iomsg->fast_iov[0].iov_len;
- iomsg->free_iov = NULL;
- }
-
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
- &iomsg->free_iov, &iomsg->msg.msg_iter,
- false);
- if (ret > 0)
- ret = 0;
+ if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
+ (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
+ int hdr;
+
+ if (unlikely(namelen < 0))
+ return -EOVERFLOW;
+ if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
+ namelen, &hdr))
+ return -EOVERFLOW;
+ if (check_add_overflow(hdr, controllen, &hdr))
+ return -EOVERFLOW;
+
+ iomsg->namelen = namelen;
+ iomsg->controllen = controllen;
+ return 0;
}
- return ret;
+ return 0;
}
-#ifdef CONFIG_COMPAT
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct compat_msghdr msg;
- struct compat_iovec __user *uiov;
+ struct user_msghdr msg;
int ret;
- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
- return -EFAULT;
-
- ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr);
+ if (unlikely(ret))
return ret;
- uiov = compat_ptr(msg.msg_iov);
- if (req->flags & REQ_F_BUFFER_SELECT) {
- compat_ssize_t clen;
-
- iomsg->free_iov = NULL;
- if (msg.msg_iovlen == 0) {
- sr->len = 0;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
- sr->len = clen;
- }
-
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
- UIO_FASTIOV, &iomsg->free_iov,
- &iomsg->msg.msg_iter, true);
- if (ret < 0)
+ if (!(req->flags & REQ_F_BUFFER_SELECT)) {
+ ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen,
+ ITER_DEST);
+ if (unlikely(ret))
return ret;
}
-
- return 0;
+ return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
+ msg.msg_controllen);
}
-#endif
-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_recvmsg_prep_setup(struct io_kiocb *req)
{
- iomsg->msg.msg_name = &iomsg->addr;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return __io_compat_recvmsg_copy_hdr(req, iomsg);
-#endif
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *kmsg;
- return __io_recvmsg_copy_hdr(req, iomsg);
-}
+ kmsg = io_msg_alloc_async(req);
+ if (unlikely(!kmsg))
+ return -ENOMEM;
-int io_recvmsg_prep_async(struct io_kiocb *req)
-{
- int ret;
+ if (req->opcode == IORING_OP_RECV) {
+ kmsg->msg.msg_name = NULL;
+ kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_inq = 0;
+ kmsg->msg.msg_control = NULL;
+ kmsg->msg.msg_get_inq = 1;
+ kmsg->msg.msg_controllen = 0;
+ kmsg->msg.msg_iocb = NULL;
+ kmsg->msg.msg_ubuf = NULL;
+
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ }
- if (!io_msg_alloc_async_prep(req))
- return -ENOMEM;
- ret = io_recvmsg_copy_hdr(req, req->async_data);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ return io_recvmsg_copy_hdr(req, kmsg);
}
-#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
+#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
+ IORING_RECVSEND_BUNDLE)
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- if (unlikely(sqe->file_index || sqe->addr2))
+ sr->done_io = 0;
+
+ if (unlikely(sqe->addr2))
return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
- if (sr->flags & ~(RECVMSG_FLAGS))
+ if (sr->flags & ~RECVMSG_FLAGS)
return -EINVAL;
- sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
+ sr->msg_flags = READ_ONCE(sqe->msg_flags);
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
if (sr->msg_flags & MSG_ERRQUEUE)
req->flags |= REQ_F_CLEAR_POLLIN;
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ sr->buf_group = req->buf_index;
+ sr->mshot_total_len = sr->mshot_len = 0;
if (sr->flags & IORING_RECV_MULTISHOT) {
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
if (sr->msg_flags & MSG_WAITALL)
return -EINVAL;
- if (req->opcode == IORING_OP_RECV && sr->len)
+ if (req->opcode == IORING_OP_RECV) {
+ sr->mshot_len = sr->len;
+ sr->mshot_total_len = READ_ONCE(sqe->optlen);
+ if (sr->mshot_total_len)
+ sr->flags |= IORING_RECV_MSHOT_LIM;
+ } else if (sqe->optlen) {
return -EINVAL;
+ }
req->flags |= REQ_F_APOLL_MULTISHOT;
- /*
- * Store the buffer group for this multishot receive separately,
- * as if we end up doing an io-wq based issue that selects a
- * buffer, it has to be committed immediately and that will
- * clear ->buf_list. This means we lose the link to the buffer
- * list, and the eventual buffer put on completion then cannot
- * restore it.
- */
- sr->buf_group = req->buf_index;
+ } else if (sqe->optlen) {
+ return -EINVAL;
}
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
- sr->done_io = 0;
- return 0;
-}
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ if (req->opcode == IORING_OP_RECVMSG)
+ return -EINVAL;
+ }
-static inline void io_recv_prep_retry(struct io_kiocb *req)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ if (io_is_compat(req->ctx))
+ sr->msg_flags |= MSG_CMSG_COMPAT;
- sr->done_io = 0;
- sr->len = 0; /* get from the provided buffer */
- req->buf_index = sr->buf_group;
+ sr->nr_multishot_loops = 0;
+ return io_recvmsg_prep_setup(req);
}
+/* bits to clear in old and inherit in new cflags on bundle retry */
+#define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
+
/*
* Finishes io_recv and io_recvmsg.
*
* Returns true if it is actually finished, or false if it should run
* again (for multishot).
*/
-static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
- unsigned int cflags, bool mshot_finished,
+static inline bool io_recv_finish(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg,
+ struct io_br_sel *sel, bool mshot_finished,
unsigned issue_flags)
{
- if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- io_req_set_res(req, *ret, cflags);
- *ret = IOU_OK;
- return true;
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ unsigned int cflags = 0;
+
+ if (kmsg->msg.msg_inq > 0)
+ cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+
+ if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
+ /*
+ * If sr->len hits zero, the limit has been reached. Mark
+ * mshot as finished, and flag MSHOT_DONE as well to prevent
+ * a potential bundle from being retried.
+ */
+ sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len);
+ if (!sr->mshot_total_len) {
+ sr->flags |= IORING_RECV_MSHOT_DONE;
+ mshot_finished = true;
+ }
}
- if (!mshot_finished) {
- if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
- io_recv_prep_retry(req);
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ size_t this_ret = sel->val - sr->done_io;
+
+ cflags |= io_put_kbufs(req, this_ret, sel->buf_list, io_bundle_nbufs(kmsg, this_ret));
+ if (sr->flags & IORING_RECV_RETRY)
+ cflags = req->cqe.flags | (cflags & CQE_F_MASK);
+ if (sr->mshot_len && sel->val >= sr->mshot_len)
+ sr->flags |= IORING_RECV_MSHOT_CAP;
+ /* bundle with no more immediate buffers, we're done */
+ if (req->flags & REQ_F_BL_EMPTY)
+ goto finish;
+ /*
+ * If more is available AND it was a full transfer, retry and
+ * append to this one
+ */
+ if (!(sr->flags & IORING_RECV_NO_RETRY) &&
+ kmsg->msg.msg_inq > 1 && this_ret > 0 &&
+ !iov_iter_count(&kmsg->msg.msg_iter)) {
+ req->cqe.flags = cflags & ~CQE_F_MASK;
+ sr->len = kmsg->msg.msg_inq;
+ sr->done_io += this_ret;
+ sr->flags |= IORING_RECV_RETRY;
return false;
}
- /* Otherwise stop multishot but use the current result. */
+ } else {
+ cflags |= io_put_kbuf(req, sel->val, sel->buf_list);
}
- io_req_set_res(req, *ret, cflags);
+ /*
+ * Fill CQE for this receive and see if we should keep trying to
+ * receive from this socket.
+ */
+ if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
+ io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
+ sel->val = IOU_RETRY;
+ io_mshot_prep_retry(req, kmsg);
+ /* Known not-empty or unknown state, retry */
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
+ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY &&
+ !(sr->flags & IORING_RECV_MSHOT_CAP)) {
+ return false;
+ }
+ /* mshot retries exceeded, force a requeue */
+ sr->nr_multishot_loops = 0;
+ sr->flags &= ~IORING_RECV_MSHOT_CAP;
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ sel->val = IOU_REQUEUE;
+ }
+ return true;
+ }
- if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = IOU_STOP_MULTISHOT;
- else
- *ret = IOU_OK;
+ /* Finish the request / stop multishot. */
+finish:
+ io_req_set_res(req, sel->val, cflags);
+ sel->val = IOU_COMPLETE;
+ io_req_msg_cleanup(req, issue_flags);
return true;
}
@@ -730,9 +1005,9 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr iomsg, *kmsg;
+ struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel = { };
struct socket *sock;
- unsigned int cflags;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
@@ -742,72 +1017,56 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(!sock))
return -ENOTSOCK;
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- ret = io_recvmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
- if (!io_check_multishot(req, issue_flags))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ flags = sr->msg_flags;
+ if (force_nonblock)
+ flags |= MSG_DONTWAIT;
retry_multishot:
+ sel.buf_list = NULL;
if (io_do_buffer_select(req)) {
- void __user *buf;
size_t len = sr->len;
- buf = io_buffer_select(req, &len, issue_flags);
- if (!buf)
+ sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
+ if (!sel.addr)
return -ENOBUFS;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
- ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
+ ret = io_recvmsg_prep_multishot(kmsg, sr, &sel.addr, &len);
if (ret) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
return ret;
}
}
- kmsg->fast_iov[0].iov_base = buf;
- kmsg->fast_iov[0].iov_len = len;
- iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
- len);
+ iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, sel.addr, len);
}
- flags = sr->msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
- if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&kmsg->msg.msg_iter);
-
kmsg->msg.msg_get_inq = 1;
- if (req->flags & REQ_F_APOLL_MULTISHOT)
+ kmsg->msg.msg_inq = -1;
+ if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
&mshot_finished);
- else
+ } else {
+ /* disable partial retry for recvmsg with cmsg attached */
+ if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
kmsg->uaddr, flags);
+ }
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- ret = io_setup_async_msg(req, kmsg, issue_flags);
- if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
- io_kbuf_recycle(req, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
- return ret;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -821,131 +1080,233 @@ retry_multishot:
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
-
- cflags = io_put_kbuf(req, issue_flags);
- if (kmsg->msg.msg_inq)
- cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- if (mshot_finished) {
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- io_netmsg_recycle(req, issue_flags);
- req->flags &= ~REQ_F_NEED_CLEANUP;
+ return sel.val;
+}
+
+static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ int ret;
+
+ /*
+ * If the ring isn't locked, then don't use the peek interface
+ * to grab multiple buffers as we will lock/unlock between
+ * this selection and posting the buffers.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+ sr->flags & IORING_RECVSEND_BUNDLE) {
+ struct buf_sel_arg arg = {
+ .iovs = &kmsg->fast_iov,
+ .nr_iovs = 1,
+ .mode = KBUF_MODE_EXPAND,
+ .buf_group = sr->buf_group,
+ };
+
+ if (kmsg->vec.iovec) {
+ arg.nr_iovs = kmsg->vec.nr;
+ arg.iovs = kmsg->vec.iovec;
+ arg.mode |= KBUF_MODE_FREE;
+ }
+
+ if (sel->val)
+ arg.max_len = sel->val;
+ else if (kmsg->msg.msg_inq > 1)
+ arg.max_len = min_not_zero(sel->val, (ssize_t) kmsg->msg.msg_inq);
+
+ /* if mshot limited, ensure we don't go over */
+ if (sr->flags & IORING_RECV_MSHOT_LIM)
+ arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len);
+ ret = io_buffers_peek(req, &arg, sel);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
+ kmsg->vec.nr = ret;
+ kmsg->vec.iovec = arg.iovs;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+ if (arg.partial_map)
+ sr->flags |= IORING_RECV_PARTIAL_MAP;
+
+ /* special case 1 vec, can be a fast path */
+ if (ret == 1) {
+ sr->buf = arg.iovs[0].iov_base;
+ sr->len = arg.iovs[0].iov_len;
+ goto map_ubuf;
+ }
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
+ arg.out_len);
+ } else {
+ size_t len = sel->val;
+
+ *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
+ if (!sel->addr)
+ return -ENOBUFS;
+ sr->buf = sel->addr;
+ sr->len = len;
+map_ubuf:
+ ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
}
- return ret;
+ return 0;
}
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct msghdr msg;
+ struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel;
struct socket *sock;
- struct iovec iov;
- unsigned int cflags;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- size_t len = sr->len;
+ bool mshot_finished;
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
- if (!io_check_multishot(req, issue_flags))
- return -EAGAIN;
-
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
+ flags = sr->msg_flags;
+ if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
retry_multishot:
+ sel.buf_list = NULL;
if (io_do_buffer_select(req)) {
- void __user *buf;
-
- buf = io_buffer_select(req, &len, issue_flags);
- if (!buf)
- return -ENOBUFS;
- sr->buf = buf;
+ sel.val = sr->len;
+ ret = io_recv_buf_select(req, kmsg, &sel, issue_flags);
+ if (unlikely(ret < 0)) {
+ kmsg->msg.msg_inq = -1;
+ goto out_free;
+ }
+ sr->buf = NULL;
}
- ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
- if (unlikely(ret))
- goto out_free;
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_get_inq = 1;
- msg.msg_flags = 0;
- msg.msg_controllen = 0;
- msg.msg_iocb = NULL;
- msg.msg_ubuf = NULL;
+ kmsg->msg.msg_flags = 0;
+ kmsg->msg.msg_inq = -1;
- flags = sr->msg_flags;
- if (force_nonblock)
- flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
- ret = sock_recvmsg(sock, &msg, flags);
+ ret = sock_recvmsg(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if (issue_flags & IO_URING_F_MULTISHOT) {
- io_kbuf_recycle(req, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
-
- return -EAGAIN;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
- } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+ } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
req_set_fail(req);
}
+ mshot_finished = ret <= 0;
if (ret > 0)
ret += sr->done_io;
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
-
- cflags = io_put_kbuf(req, issue_flags);
- if (msg.msg_inq)
- cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- return ret;
+ return sel.val;
+}
+
+int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
+ unsigned ifq_idx;
+
+ if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3))
+ return -EINVAL;
+
+ ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx);
+ zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx);
+ if (!zc->ifq)
+ return -EINVAL;
+
+ zc->len = READ_ONCE(sqe->len);
+ zc->flags = READ_ONCE(sqe->ioprio);
+ if (READ_ONCE(sqe->msg_flags))
+ return -EINVAL;
+ if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT))
+ return -EINVAL;
+ /* multishot required */
+ if (!(zc->flags & IORING_RECV_MULTISHOT))
+ return -EINVAL;
+ /* All data completions are posted as aux CQEs. */
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+
+ return 0;
+}
+
+int io_recvzc(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
+ struct socket *sock;
+ unsigned int len;
+ int ret;
+
+ if (!(req->flags & REQ_F_POLLED) &&
+ (zc->flags & IORING_RECVSEND_POLL_FIRST))
+ return -EAGAIN;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ len = zc->len;
+ ret = io_zcrx_recv(req, zc->ifq, sock, 0, issue_flags, &zc->len);
+ if (len && zc->len == 0) {
+ io_req_set_res(req, 0, 0);
+
+ return IOU_COMPLETE;
+ }
+ if (unlikely(ret <= 0) && ret != -EAGAIN) {
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret == IOU_REQUEUE)
+ return IOU_REQUEUE;
+
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+ }
+ return IOU_RETRY;
}
void io_send_zc_cleanup(struct io_kiocb *req)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr *io;
+ struct io_async_msghdr *io = req->async_data;
- if (req_has_async_data(req)) {
- io = req->async_data;
- /* might be ->fast_iov if *msg_copy_hdr failed */
- if (io->free_iov != io->fast_iov)
- kfree(io->free_iov);
- }
+ if (req_has_async_data(req))
+ io_netmsg_iovec_free(io);
if (zc->notif) {
io_notif_flush(zc->notif);
zc->notif = NULL;
@@ -953,13 +1314,18 @@ void io_send_zc_cleanup(struct io_kiocb *req)
}
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
-#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
+#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \
+ IORING_SEND_VECTORIZED)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_async_msghdr *iomsg;
struct io_kiocb *notif;
+ int ret;
+
+ zc->done_io = 0;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
@@ -973,63 +1339,60 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.user_data = req->cqe.user_data;
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
- req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
if (zc->flags & ~IO_ZC_FLAGS_VALID)
return -EINVAL;
if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
- io_notif_set_extended(notif);
- io_notif_to_data(notif)->zc_report = true;
+ struct io_notif_data *nd = io_notif_to_data(notif);
+
+ nd->zc_report = true;
+ nd->zc_used = false;
+ nd->zc_copied = false;
}
}
- if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
- unsigned idx = READ_ONCE(sqe->buf_index);
+ zc->len = READ_ONCE(sqe->len);
+ zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
+ req->buf_index = READ_ONCE(sqe->buf_index);
+ if (zc->msg_flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+
+ if (io_is_compat(req->ctx))
+ zc->msg_flags |= MSG_CMSG_COMPAT;
- if (unlikely(idx >= ctx->nr_user_bufs))
- return -EFAULT;
- idx = array_index_nospec(idx, ctx->nr_user_bufs);
- req->imu = READ_ONCE(ctx->user_bufs[idx]);
- io_req_set_rsrc_node(notif, ctx, 0);
- }
+ iomsg = io_msg_alloc_async(req);
+ if (unlikely(!iomsg))
+ return -ENOMEM;
if (req->opcode == IORING_OP_SEND_ZC) {
- if (READ_ONCE(sqe->__pad3[0]))
- return -EINVAL;
- zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- zc->addr_len = READ_ONCE(sqe->addr_len);
+ ret = io_send_setup(req, sqe);
} else {
if (unlikely(sqe->addr2 || sqe->file_index))
return -EINVAL;
- if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
- return -EINVAL;
+ ret = io_sendmsg_setup(req, sqe);
}
+ if (unlikely(ret))
+ return ret;
- zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
- zc->len = READ_ONCE(sqe->len);
- zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- if (zc->msg_flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
-
- zc->done_io = 0;
-
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- zc->msg_flags |= MSG_CMSG_COMPAT;
-#endif
+ if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
+ iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+ return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
+ }
+ iomsg->msg.sg_from_iter = io_sg_from_iter;
return 0;
}
-static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
skb_zcopy_downgrade_managed(skb);
- return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+ return zerocopy_fill_skb_from_iter(skb, from, length);
}
-static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
+static int io_sg_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -1042,7 +1405,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
if (!frag)
shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
else if (unlikely(!skb_zcopy_managed(skb)))
- return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+ return zerocopy_fill_skb_from_iter(skb, from, length);
bi.bi_size = min(from->count, length);
bi.bi_bvec_done = from->iov_offset;
@@ -1069,23 +1432,26 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
skb->data_len += copied;
skb->len += copied;
skb->truesize += truesize;
-
- if (sk && sk->sk_type == SOCK_STREAM) {
- sk_wmem_queued_add(sk, truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_charge(sk, truesize);
- } else {
- refcount_add(truesize, &skb->sk->sk_wmem_alloc);
- }
return ret;
}
+static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *kmsg = req->async_data;
+
+ WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
+
+ sr->notif->buf_index = req->buf_index;
+ return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
+}
+
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
{
- struct sockaddr_storage __address;
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct msghdr msg;
- struct iovec iov;
+ struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned msg_flags;
int ret, min_ret = 0;
@@ -1096,67 +1462,37 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
-
- if (zc->addr) {
- if (req_has_async_data(req)) {
- struct io_async_msghdr *io = req->async_data;
-
- msg.msg_name = &io->addr;
- } else {
- ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
- if (unlikely(ret < 0))
- return ret;
- msg.msg_name = (struct sockaddr *)&__address;
- }
- msg.msg_namelen = zc->addr_len;
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_addr(req, &__address, issue_flags);
+ return -EAGAIN;
- if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
- ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
- (u64)(uintptr_t)zc->buf, zc->len);
- if (unlikely(ret))
- return ret;
- msg.sg_from_iter = io_sg_from_iter;
- } else {
- io_notif_set_extended(zc->notif);
- ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
- &msg.msg_iter);
- if (unlikely(ret))
- return ret;
- ret = io_notif_account_mem(zc->notif, zc->len);
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
+ ret = io_send_zc_import(req, issue_flags);
if (unlikely(ret))
return ret;
- msg.sg_from_iter = io_sg_from_iter_iovec;
}
- msg_flags = zc->msg_flags | MSG_ZEROCOPY;
+ msg_flags = zc->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
msg_flags |= MSG_DONTWAIT;
if (msg_flags & MSG_WAITALL)
- min_ret = iov_iter_count(&msg.msg_iter);
+ min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
- msg.msg_flags = msg_flags;
- msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
- ret = sock_sendmsg(sock, &msg);
+ kmsg->msg.msg_flags = msg_flags;
+ kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
+ ret = sock_sendmsg(sock, &kmsg->msg);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return io_setup_async_addr(req, &__address, issue_flags);
+ return -EAGAIN;
- if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
+ if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_addr(req, &__address, issue_flags);
+ return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1174,21 +1510,33 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
- req->flags &= ~REQ_F_NEED_CLEANUP;
+ zc->notif = NULL;
+ io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct io_async_msghdr iomsg, *kmsg;
+ struct io_async_msghdr *kmsg = req->async_data;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
- io_notif_set_extended(sr->notif);
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
+ int ret;
+
+ sr->notif->buf_index = req->buf_index;
+ ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter,
+ sr->notif, &kmsg->vec, uvec_segs,
+ issue_flags);
+ if (unlikely(ret))
+ return ret;
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
+ }
sock = sock_from_file(req->file);
if (unlikely(!sock))
@@ -1196,49 +1544,33 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
return -EOPNOTSUPP;
- if (req_has_async_data(req)) {
- kmsg = req->async_data;
- } else {
- ret = io_sendmsg_copy_hdr(req, &iomsg);
- if (ret)
- return ret;
- kmsg = &iomsg;
- }
-
if (!(req->flags & REQ_F_POLLED) &&
(sr->flags & IORING_RECVSEND_POLL_FIRST))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
- flags = sr->msg_flags | MSG_ZEROCOPY;
+ flags = sr->msg_flags;
if (issue_flags & IO_URING_F_NONBLOCK)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (unlikely(ret < min_ret)) {
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_PARTIAL_IO;
- return io_setup_async_msg(req, kmsg, issue_flags);
+ return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
}
- /* fast path, check for non-NULL to avoid function call */
- if (kmsg->free_iov) {
- kfree(kmsg->free_iov);
- kmsg->free_iov = NULL;
- }
- io_netmsg_recycle(req, issue_flags);
if (ret >= 0)
ret += sr->done_io;
else if (sr->done_io)
@@ -1250,17 +1582,18 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(sr->notif);
- req->flags &= ~REQ_F_NEED_CLEANUP;
+ sr->notif = NULL;
+ io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_sendrecv_fail(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- if (req->flags & REQ_F_PARTIAL_IO)
+ if (sr->done_io)
req->cqe.res = sr->done_io;
if ((req->flags & REQ_F_NEED_CLEANUP) &&
@@ -1268,10 +1601,12 @@ void io_sendrecv_fail(struct io_kiocb *req)
req->cqe.flags |= IORING_CQE_F_MORE;
}
+#define ACCEPT_FLAGS (IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
+ IORING_ACCEPT_POLL_FIRST)
+
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
- unsigned flags;
if (sqe->len || sqe->buf_index)
return -EINVAL;
@@ -1280,15 +1615,15 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
accept->flags = READ_ONCE(sqe->accept_flags);
accept->nofile = rlimit(RLIMIT_NOFILE);
- flags = READ_ONCE(sqe->ioprio);
- if (flags & ~IORING_ACCEPT_MULTISHOT)
+ accept->iou_flags = READ_ONCE(sqe->ioprio);
+ if (accept->iou_flags & ~ACCEPT_FLAGS)
return -EINVAL;
accept->file_slot = READ_ONCE(sqe->file_index);
if (accept->file_slot) {
if (accept->flags & SOCK_CLOEXEC)
return -EINVAL;
- if (flags & IORING_ACCEPT_MULTISHOT &&
+ if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
accept->file_slot != IORING_FILE_INDEX_ALLOC)
return -EINVAL;
}
@@ -1296,48 +1631,49 @@ int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL;
if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- if (flags & IORING_ACCEPT_MULTISHOT)
+ if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
req->flags |= REQ_F_APOLL_MULTISHOT;
+ if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
return 0;
}
int io_accept(struct io_kiocb *req, unsigned int issue_flags)
{
- struct io_ring_ctx *ctx = req->ctx;
struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
bool fixed = !!accept->file_slot;
+ struct proto_accept_arg arg = {
+ .flags = force_nonblock ? O_NONBLOCK : 0,
+ };
struct file *file;
+ unsigned cflags;
int ret, fd;
- if (!io_check_multishot(req, issue_flags))
+ if (!(req->flags & REQ_F_POLLED) &&
+ accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
return -EAGAIN;
+
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);
if (unlikely(fd < 0))
return fd;
}
- file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
+ arg.err = 0;
+ arg.is_empty = -1;
+ file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
accept->flags);
if (IS_ERR(file)) {
if (!fixed)
put_unused_fd(fd);
ret = PTR_ERR(file);
- if (ret == -EAGAIN && force_nonblock) {
- /*
- * if it's multishot and polled, we don't need to
- * return EAGAIN to arm the poll infra since it
- * has already been done
- */
- if (issue_flags & IO_URING_F_MULTISHOT)
- ret = IOU_ISSUE_SKIP_COMPLETE;
- return ret;
- }
+ if (ret == -EAGAIN && force_nonblock &&
+ !(accept->iou_flags & IORING_ACCEPT_DONTWAIT))
+ return IOU_RETRY;
+
if (ret == -ERESTARTSYS)
ret = -EINTR;
- req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
@@ -1346,18 +1682,21 @@ retry:
accept->file_slot);
}
- if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- io_req_set_res(req, ret, 0);
- return IOU_OK;
+ cflags = 0;
+ if (!arg.is_empty)
+ cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+
+ if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
+ io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
+ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
+ goto retry;
+ return IOU_RETRY;
}
+ io_req_set_res(req, ret, cflags);
if (ret < 0)
- return ret;
- if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
- req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
- goto retry;
-
- return -ECANCELED;
+ req_set_fail(req);
+ return IOU_COMPLETE;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -1411,88 +1750,148 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags)
sock->file_slot);
}
io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
-int io_connect_prep_async(struct io_kiocb *req)
-{
- struct io_async_connect *io = req->async_data;
- struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
-
- return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
+ return IOU_COMPLETE;
}
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
+ struct io_async_msghdr *io;
if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
return -EINVAL;
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2);
- conn->in_progress = false;
- return 0;
+ conn->in_progress = conn->seen_econnaborted = false;
+
+ io = io_msg_alloc_async(req);
+ if (unlikely(!io))
+ return -ENOMEM;
+
+ return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
}
int io_connect(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
- struct io_async_connect __io, *io;
+ struct io_async_msghdr *io = req->async_data;
unsigned file_flags;
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (connect->in_progress) {
- struct socket *socket;
-
- ret = -ENOTSOCK;
- socket = sock_from_file(req->file);
- if (socket)
- ret = sock_error(socket->sk);
- goto out;
- }
+ struct poll_table_struct pt = { ._key = EPOLLERR };
- if (req_has_async_data(req)) {
- io = req->async_data;
- } else {
- ret = move_addr_to_kernel(connect->addr,
- connect->addr_len,
- &__io.address);
- if (ret)
- goto out;
- io = &__io;
+ if (vfs_poll(req->file, &pt) & EPOLLERR)
+ goto get_sock_err;
}
file_flags = force_nonblock ? O_NONBLOCK : 0;
- ret = __sys_connect_file(req->file, &io->address,
- connect->addr_len, file_flags);
- if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
+ ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
+ file_flags);
+ if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
+ && force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
- } else {
- if (req_has_async_data(req))
- return -EAGAIN;
- if (io_alloc_async_data(req)) {
- ret = -ENOMEM;
+ } else if (ret == -ECONNABORTED) {
+ if (connect->seen_econnaborted)
goto out;
- }
- memcpy(req->async_data, &__io, sizeof(__io));
+ connect->seen_econnaborted = true;
}
return -EAGAIN;
}
+ if (connect->in_progress) {
+ /*
+ * At least bluetooth will return -EBADFD on a re-connect
+ * attempt, and it's (supposedly) also valid to get -EISCONN
+ * which means the previous result is good. For both of these,
+ * grab the sock_error() and use that for the completion.
+ */
+ if (ret == -EBADFD || ret == -EISCONN) {
+get_sock_err:
+ ret = sock_error(sock_from_file(req->file)->sk);
+ }
+ }
if (ret == -ERESTARTSYS)
ret = -EINTR;
out:
if (ret < 0)
req_set_fail(req);
+ io_req_msg_cleanup(req, issue_flags);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
+}
+
+int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct sockaddr __user *uaddr;
+ struct io_async_msghdr *io;
+
+ if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
+ return -EINVAL;
+
+ uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ bind->addr_len = READ_ONCE(sqe->addr2);
+
+ io = io_msg_alloc_async(req);
+ if (unlikely(!io))
+ return -ENOMEM;
+ return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
}
-void io_netmsg_cache_free(struct io_cache_entry *entry)
+int io_bind(struct io_kiocb *req, unsigned int issue_flags)
{
- kfree(container_of(entry, struct io_async_msghdr, cache));
+ struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
+ struct io_async_msghdr *io = req->async_data;
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
+int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+
+ if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
+ return -EINVAL;
+
+ listen->backlog = READ_ONCE(sqe->len);
+ return 0;
+}
+
+int io_listen(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
+ struct socket *sock;
+ int ret;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ ret = __sys_listen_socket(sock, listen->backlog);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return 0;
+}
+
+void io_netmsg_cache_free(const void *entry)
+{
+ struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
+
+ io_vec_free(&kmsg->vec);
+ kfree(kmsg);
}
-#endif
diff --git a/io_uring/net.h b/io_uring/net.h
index 5ffa11bf5d2e..43e5ce5416b7 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -2,44 +2,37 @@
#include <linux/net.h>
#include <linux/uio.h>
+#include <linux/io_uring_types.h>
-#include "alloc_cache.h"
-
-#if defined(CONFIG_NET)
struct io_async_msghdr {
- union {
- struct iovec fast_iov[UIO_FASTIOV];
- struct {
- struct iovec fast_iov_one;
- __kernel_size_t controllen;
- int namelen;
- __kernel_size_t payloadlen;
- };
- struct io_cache_entry cache;
- };
- /* points to an allocated iov, if NULL we use fast_iov instead */
- struct iovec *free_iov;
- struct sockaddr __user *uaddr;
- struct msghdr msg;
- struct sockaddr_storage addr;
-};
+#if defined(CONFIG_NET)
+ struct iou_vec vec;
-struct io_async_connect {
- struct sockaddr_storage address;
+ struct_group(clear,
+ int namelen;
+ struct iovec fast_iov;
+ __kernel_size_t controllen;
+ __kernel_size_t payloadlen;
+ struct sockaddr __user *uaddr;
+ struct msghdr msg;
+ struct sockaddr_storage addr;
+ );
+#else
+ struct_group(clear);
+#endif
};
+#if defined(CONFIG_NET)
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
-int io_sendmsg_prep_async(struct io_kiocb *req);
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_send(struct io_kiocb *req, unsigned int issue_flags);
-int io_send_prep_async(struct io_kiocb *req);
-int io_recvmsg_prep_async(struct io_kiocb *req);
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
@@ -52,7 +45,6 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags);
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
-int io_connect_prep_async(struct io_kiocb *req);
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
@@ -61,9 +53,15 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req);
-void io_netmsg_cache_free(struct io_cache_entry *entry);
+int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_bind(struct io_kiocb *req, unsigned int issue_flags);
+
+int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_listen(struct io_kiocb *req, unsigned int issue_flags);
+
+void io_netmsg_cache_free(const void *entry);
#else
-static inline void io_netmsg_cache_free(struct io_cache_entry *entry)
+static inline void io_netmsg_cache_free(const void *entry)
{
}
#endif
diff --git a/io_uring/nop.c b/io_uring/nop.c
index d956599a3c1b..3caf07878f8a 100644
--- a/io_uring/nop.c
+++ b/io_uring/nop.c
@@ -8,18 +8,84 @@
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "rsrc.h"
#include "nop.h"
+struct io_nop {
+ /* NOTE: kiocb has the file as the first member, so don't do it here */
+ struct file *file;
+ int result;
+ int fd;
+ unsigned int flags;
+ __u64 extra1;
+ __u64 extra2;
+};
+
+#define NOP_FLAGS (IORING_NOP_INJECT_RESULT | IORING_NOP_FIXED_FILE | \
+ IORING_NOP_FIXED_BUFFER | IORING_NOP_FILE | \
+ IORING_NOP_TW | IORING_NOP_CQE32)
+
int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop);
+
+ nop->flags = READ_ONCE(sqe->nop_flags);
+ if (nop->flags & ~NOP_FLAGS)
+ return -EINVAL;
+
+ if (nop->flags & IORING_NOP_INJECT_RESULT)
+ nop->result = READ_ONCE(sqe->len);
+ else
+ nop->result = 0;
+ if (nop->flags & IORING_NOP_FILE)
+ nop->fd = READ_ONCE(sqe->fd);
+ else
+ nop->fd = -1;
+ if (nop->flags & IORING_NOP_FIXED_BUFFER)
+ req->buf_index = READ_ONCE(sqe->buf_index);
+ if (nop->flags & IORING_NOP_CQE32) {
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
+ return -EINVAL;
+ nop->extra1 = READ_ONCE(sqe->off);
+ nop->extra2 = READ_ONCE(sqe->addr);
+ }
return 0;
}
-/*
- * IORING_OP_NOP just posts a completion event, nothing else.
- */
int io_nop(struct io_kiocb *req, unsigned int issue_flags)
{
- io_req_set_res(req, 0, 0);
- return IOU_OK;
+ struct io_nop *nop = io_kiocb_to_cmd(req, struct io_nop);
+ int ret = nop->result;
+
+ if (nop->flags & IORING_NOP_FILE) {
+ if (nop->flags & IORING_NOP_FIXED_FILE) {
+ req->file = io_file_get_fixed(req, nop->fd, issue_flags);
+ req->flags |= REQ_F_FIXED_FILE;
+ } else {
+ req->file = io_file_get_normal(req, nop->fd);
+ }
+ if (!req->file) {
+ ret = -EBADF;
+ goto done;
+ }
+ }
+ if (nop->flags & IORING_NOP_FIXED_BUFFER) {
+ if (!io_find_buf_node(req, issue_flags))
+ ret = -EFAULT;
+ }
+done:
+ if (ret < 0)
+ req_set_fail(req);
+ if (nop->flags & IORING_NOP_CQE32)
+ io_req_set_res32(req, nop->result, 0, nop->extra1, nop->extra2);
+ else
+ io_req_set_res(req, nop->result, 0);
+ if (nop->flags & IORING_NOP_TW) {
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req);
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+ return IOU_COMPLETE;
}
diff --git a/io_uring/notif.c b/io_uring/notif.c
index c4bb793ebf0e..f476775ba44b 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -9,35 +9,42 @@
#include "notif.h"
#include "rsrc.h"
-static void io_notif_complete_tw_ext(struct io_kiocb *notif, bool *locked)
+static const struct ubuf_info_ops io_ubuf_ops;
+
+static void io_notif_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *notif = tw_req.req;
struct io_notif_data *nd = io_notif_to_data(notif);
struct io_ring_ctx *ctx = notif->ctx;
- if (nd->zc_report && (nd->zc_copied || !nd->zc_used))
- notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
+ lockdep_assert_held(&ctx->uring_lock);
- if (nd->account_pages && ctx->user) {
- __io_unaccount_mem(ctx->user, nd->account_pages);
- nd->account_pages = 0;
- }
- io_req_task_complete(notif, locked);
-}
+ do {
+ notif = cmd_to_io_kiocb(nd);
-static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success)
-{
- struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
- struct io_kiocb *notif = cmd_to_io_kiocb(nd);
+ if (WARN_ON_ONCE(ctx != notif->ctx))
+ return;
+ lockdep_assert(refcount_read(&nd->uarg.refcnt) == 0);
+
+ if (unlikely(nd->zc_report) && (nd->zc_copied || !nd->zc_used))
+ notif->cqe.res |= IORING_NOTIF_USAGE_ZC_COPIED;
+
+ if (nd->account_pages && notif->ctx->user) {
+ __io_unaccount_mem(notif->ctx->user, nd->account_pages);
+ nd->account_pages = 0;
+ }
- if (refcount_dec_and_test(&uarg->refcnt))
- io_req_task_work_add(notif);
+ nd = nd->next;
+ io_req_task_complete((struct io_tw_req){notif}, tw);
+ } while (nd);
}
-static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success)
+void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success)
{
struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
+ struct io_kiocb *notif = cmd_to_io_kiocb(nd);
+ unsigned tw_flags;
if (nd->zc_report) {
if (success && !nd->zc_used && skb)
@@ -45,43 +52,89 @@ static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg,
else if (!success && !nd->zc_copied)
WRITE_ONCE(nd->zc_copied, true);
}
- io_tx_ubuf_callback(skb, uarg, success);
+
+ if (!refcount_dec_and_test(&uarg->refcnt))
+ return;
+
+ if (nd->head != nd) {
+ io_tx_ubuf_complete(skb, &nd->head->uarg, success);
+ return;
+ }
+
+ tw_flags = nd->next ? 0 : IOU_F_TWQ_LAZY_WAKE;
+ notif->io_task_work.func = io_notif_tw_complete;
+ __io_req_task_work_add(notif, tw_flags);
}
-void io_notif_set_extended(struct io_kiocb *notif)
+static int io_link_skb(struct sk_buff *skb, struct ubuf_info *uarg)
{
- struct io_notif_data *nd = io_notif_to_data(notif);
+ struct io_notif_data *nd, *prev_nd;
+ struct io_kiocb *prev_notif, *notif;
+ struct ubuf_info *prev_uarg = skb_zcopy(skb);
+
+ nd = container_of(uarg, struct io_notif_data, uarg);
+ notif = cmd_to_io_kiocb(nd);
- if (nd->uarg.callback != io_tx_ubuf_callback_ext) {
- nd->account_pages = 0;
- nd->zc_report = false;
- nd->zc_used = false;
- nd->zc_copied = false;
- nd->uarg.callback = io_tx_ubuf_callback_ext;
- notif->io_task_work.func = io_notif_complete_tw_ext;
+ if (!prev_uarg) {
+ net_zcopy_get(&nd->uarg);
+ skb_zcopy_init(skb, &nd->uarg);
+ return 0;
}
+ /* handle it separately as we can't link a notif to itself */
+ if (unlikely(prev_uarg == &nd->uarg))
+ return 0;
+ /* we can't join two links together, just request a fresh skb */
+ if (unlikely(nd->head != nd || nd->next))
+ return -EEXIST;
+ /* don't mix zc providers */
+ if (unlikely(prev_uarg->ops != &io_ubuf_ops))
+ return -EEXIST;
+
+ prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
+ prev_notif = cmd_to_io_kiocb(prev_nd);
+
+ /* make sure all notifications can be finished in the same task_work */
+ if (unlikely(notif->ctx != prev_notif->ctx ||
+ notif->tctx != prev_notif->tctx))
+ return -EEXIST;
+
+ nd->head = prev_nd->head;
+ nd->next = prev_nd->next;
+ prev_nd->next = nd;
+ net_zcopy_get(&nd->head->uarg);
+ return 0;
}
+static const struct ubuf_info_ops io_ubuf_ops = {
+ .complete = io_tx_ubuf_complete,
+ .link_skb = io_link_skb,
+};
+
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_kiocb *notif;
struct io_notif_data *nd;
- if (unlikely(!io_alloc_req_refill(ctx)))
+ if (unlikely(!io_alloc_req(ctx, &notif)))
return NULL;
- notif = io_alloc_req(ctx);
+ notif->ctx = ctx;
notif->opcode = IORING_OP_NOP;
notif->flags = 0;
notif->file = NULL;
- notif->task = current;
+ notif->tctx = current->io_uring;
io_get_task_refs(1);
- notif->rsrc_node = NULL;
- notif->io_task_work.func = io_req_task_complete;
+ notif->file_node = NULL;
+ notif->buf_node = NULL;
nd = io_notif_to_data(notif);
- nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
- nd->uarg.callback = io_tx_ubuf_callback;
+ nd->zc_report = false;
+ nd->account_pages = 0;
+ nd->next = NULL;
+ nd->head = nd;
+
+ nd->uarg.flags = IO_NOTIF_UBUF_FLAGS;
+ nd->uarg.ops = &io_ubuf_ops;
refcount_set(&nd->uarg.refcnt, 1);
return notif;
}
diff --git a/io_uring/notif.h b/io_uring/notif.h
index c88c800cd89d..f3589cfef4a9 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -7,19 +7,25 @@
#include "rsrc.h"
+#define IO_NOTIF_UBUF_FLAGS (SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN)
#define IO_NOTIF_SPLICE_BATCH 32
struct io_notif_data {
struct file *file;
struct ubuf_info uarg;
- unsigned long account_pages;
+
+ struct io_notif_data *next;
+ struct io_notif_data *head;
+
+ unsigned account_pages;
bool zc_report;
bool zc_used;
bool zc_copied;
};
struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
-void io_notif_set_extended(struct io_kiocb *notif);
+void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool success);
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
{
@@ -31,9 +37,7 @@ static inline void io_notif_flush(struct io_kiocb *notif)
{
struct io_notif_data *nd = io_notif_to_data(notif);
- /* drop slot's master ref */
- if (refcount_dec_and_test(&nd->uarg.refcnt))
- io_req_task_work_add(notif);
+ io_tx_ubuf_complete(NULL, &nd->uarg, true);
}
static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 3aa0d65c50e3..df52d760240e 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include "io_uring.h"
#include "opdef.h"
@@ -33,6 +34,10 @@
#include "poll.h"
#include "cancel.h"
#include "rw.h"
+#include "waitid.h"
+#include "futex.h"
+#include "truncate.h"
+#include "zcrx.h"
static int io_no_issue(struct io_kiocb *req, unsigned int issue_flags)
{
@@ -46,11 +51,10 @@ static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb,
return -EOPNOTSUPP;
}
-const struct io_op_def io_op_defs[] = {
+const struct io_issue_def io_issue_defs[] = {
[IORING_OP_NOP] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "NOP",
.prep = io_nop_prep,
.issue = io_nop,
},
@@ -64,13 +68,10 @@ const struct io_op_def io_op_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
+ .vectored = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "READV",
- .prep = io_prep_rw,
+ .prep = io_prep_readv,
.issue = io_read,
- .prep_async = io_readv_prep_async,
- .cleanup = io_readv_writev_cleanup,
- .fail = io_rw_fail,
},
[IORING_OP_WRITEV] = {
.needs_file = 1,
@@ -82,18 +83,14 @@ const struct io_op_def io_op_defs[] = {
.ioprio = 1,
.iopoll = 1,
.iopoll_queue = 1,
+ .vectored = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "WRITEV",
- .prep = io_prep_rw,
+ .prep = io_prep_writev,
.issue = io_write,
- .prep_async = io_writev_prep_async,
- .cleanup = io_readv_writev_cleanup,
- .fail = io_rw_fail,
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
.audit_skip = 1,
- .name = "FSYNC",
.prep = io_fsync_prep,
.issue = io_fsync,
},
@@ -107,10 +104,8 @@ const struct io_op_def io_op_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "READ_FIXED",
- .prep = io_prep_rw,
- .issue = io_read,
- .fail = io_rw_fail,
+ .prep = io_prep_read_fixed,
+ .issue = io_read_fixed,
},
[IORING_OP_WRITE_FIXED] = {
.needs_file = 1,
@@ -123,29 +118,24 @@ const struct io_op_def io_op_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "WRITE_FIXED",
- .prep = io_prep_rw,
- .issue = io_write,
- .fail = io_rw_fail,
+ .prep = io_prep_write_fixed,
+ .issue = io_write_fixed,
},
[IORING_OP_POLL_ADD] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.audit_skip = 1,
- .name = "POLL_ADD",
.prep = io_poll_add_prep,
.issue = io_poll_add,
},
[IORING_OP_POLL_REMOVE] = {
.audit_skip = 1,
- .name = "POLL_REMOVE",
.prep = io_poll_remove_prep,
.issue = io_poll_remove,
},
[IORING_OP_SYNC_FILE_RANGE] = {
.needs_file = 1,
.audit_skip = 1,
- .name = "SYNC_FILE_RANGE",
.prep = io_sfr_prep,
.issue = io_sync_file_range,
},
@@ -154,15 +144,10 @@ const struct io_op_def io_op_defs[] = {
.unbound_nonreg_file = 1,
.pollout = 1,
.ioprio = 1,
- .manual_alloc = 1,
- .name = "SENDMSG",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_sendmsg_prep,
.issue = io_sendmsg,
- .prep_async = io_sendmsg_prep_async,
- .cleanup = io_sendmsg_recvmsg_cleanup,
- .fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
@@ -173,15 +158,10 @@ const struct io_op_def io_op_defs[] = {
.pollin = 1,
.buffer_select = 1,
.ioprio = 1,
- .manual_alloc = 1,
- .name = "RECVMSG",
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_recvmsg_prep,
.issue = io_recvmsg,
- .prep_async = io_recvmsg_prep_async,
- .cleanup = io_sendmsg_recvmsg_cleanup,
- .fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
@@ -189,14 +169,12 @@ const struct io_op_def io_op_defs[] = {
[IORING_OP_TIMEOUT] = {
.audit_skip = 1,
.async_size = sizeof(struct io_timeout_data),
- .name = "TIMEOUT",
.prep = io_timeout_prep,
.issue = io_timeout,
},
[IORING_OP_TIMEOUT_REMOVE] = {
/* used by timeout updates' prep() */
.audit_skip = 1,
- .name = "TIMEOUT_REMOVE",
.prep = io_timeout_remove_prep,
.issue = io_timeout_remove,
},
@@ -206,7 +184,6 @@ const struct io_op_def io_op_defs[] = {
.pollin = 1,
.poll_exclusive = 1,
.ioprio = 1, /* used for flags */
- .name = "ACCEPT",
#if defined(CONFIG_NET)
.prep = io_accept_prep,
.issue = io_accept,
@@ -216,14 +193,12 @@ const struct io_op_def io_op_defs[] = {
},
[IORING_OP_ASYNC_CANCEL] = {
.audit_skip = 1,
- .name = "ASYNC_CANCEL",
.prep = io_async_cancel_prep,
.issue = io_async_cancel,
},
[IORING_OP_LINK_TIMEOUT] = {
.audit_skip = 1,
.async_size = sizeof(struct io_timeout_data),
- .name = "LINK_TIMEOUT",
.prep = io_link_timeout_prep,
.issue = io_no_issue,
},
@@ -231,46 +206,38 @@ const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
- .name = "CONNECT",
#if defined(CONFIG_NET)
- .async_size = sizeof(struct io_async_connect),
+ .async_size = sizeof(struct io_async_msghdr),
.prep = io_connect_prep,
.issue = io_connect,
- .prep_async = io_connect_prep_async,
#else
.prep = io_eopnotsupp_prep,
#endif
},
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
- .name = "FALLOCATE",
+ .hash_reg_file = 1,
.prep = io_fallocate_prep,
.issue = io_fallocate,
},
[IORING_OP_OPENAT] = {
- .name = "OPENAT",
.prep = io_openat_prep,
.issue = io_openat,
- .cleanup = io_open_cleanup,
},
[IORING_OP_CLOSE] = {
- .name = "CLOSE",
.prep = io_close_prep,
.issue = io_close,
},
[IORING_OP_FILES_UPDATE] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "FILES_UPDATE",
.prep = io_files_update_prep,
.issue = io_files_update,
},
[IORING_OP_STATX] = {
.audit_skip = 1,
- .name = "STATX",
.prep = io_statx_prep,
.issue = io_statx,
- .cleanup = io_statx_cleanup,
},
[IORING_OP_READ] = {
.needs_file = 1,
@@ -283,10 +250,8 @@ const struct io_op_def io_op_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "READ",
- .prep = io_prep_rw,
+ .prep = io_prep_read,
.issue = io_read,
- .fail = io_rw_fail,
},
[IORING_OP_WRITE] = {
.needs_file = 1,
@@ -299,20 +264,17 @@ const struct io_op_def io_op_defs[] = {
.iopoll = 1,
.iopoll_queue = 1,
.async_size = sizeof(struct io_async_rw),
- .name = "WRITE",
- .prep = io_prep_rw,
+ .prep = io_prep_write,
.issue = io_write,
- .fail = io_rw_fail,
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
.audit_skip = 1,
- .name = "FADVISE",
.prep = io_fadvise_prep,
.issue = io_fadvise,
},
[IORING_OP_MADVISE] = {
- .name = "MADVISE",
+ .audit_skip = 1,
.prep = io_madvise_prep,
.issue = io_madvise,
},
@@ -322,14 +284,11 @@ const struct io_op_def io_op_defs[] = {
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
- .manual_alloc = 1,
- .name = "SEND",
+ .buffer_select = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_sendmsg_prep,
.issue = io_send,
- .fail = io_sendrecv_fail,
- .prep_async = io_send_prep_async,
#else
.prep = io_eopnotsupp_prep,
#endif
@@ -341,25 +300,21 @@ const struct io_op_def io_op_defs[] = {
.buffer_select = 1,
.audit_skip = 1,
.ioprio = 1,
- .name = "RECV",
#if defined(CONFIG_NET)
+ .async_size = sizeof(struct io_async_msghdr),
.prep = io_recvmsg_prep,
.issue = io_recv,
- .fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
},
[IORING_OP_OPENAT2] = {
- .name = "OPENAT2",
.prep = io_openat2_prep,
.issue = io_openat2,
- .cleanup = io_open_cleanup,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
.audit_skip = 1,
- .name = "EPOLL",
#if defined(CONFIG_EPOLL)
.prep = io_epoll_ctl_prep,
.issue = io_epoll_ctl,
@@ -372,36 +327,31 @@ const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.audit_skip = 1,
- .name = "SPLICE",
.prep = io_splice_prep,
.issue = io_splice,
},
[IORING_OP_PROVIDE_BUFFERS] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "PROVIDE_BUFFERS",
.prep = io_provide_buffers_prep,
- .issue = io_provide_buffers,
+ .issue = io_manage_buffers_legacy,
},
[IORING_OP_REMOVE_BUFFERS] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "REMOVE_BUFFERS",
.prep = io_remove_buffers_prep,
- .issue = io_remove_buffers,
+ .issue = io_manage_buffers_legacy,
},
[IORING_OP_TEE] = {
.needs_file = 1,
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.audit_skip = 1,
- .name = "TEE",
.prep = io_tee_prep,
.issue = io_tee,
},
[IORING_OP_SHUTDOWN] = {
.needs_file = 1,
- .name = "SHUTDOWN",
#if defined(CONFIG_NET)
.prep = io_shutdown_prep,
.issue = io_shutdown,
@@ -410,72 +360,51 @@ const struct io_op_def io_op_defs[] = {
#endif
},
[IORING_OP_RENAMEAT] = {
- .name = "RENAMEAT",
.prep = io_renameat_prep,
.issue = io_renameat,
- .cleanup = io_renameat_cleanup,
},
[IORING_OP_UNLINKAT] = {
- .name = "UNLINKAT",
.prep = io_unlinkat_prep,
.issue = io_unlinkat,
- .cleanup = io_unlinkat_cleanup,
},
[IORING_OP_MKDIRAT] = {
- .name = "MKDIRAT",
.prep = io_mkdirat_prep,
.issue = io_mkdirat,
- .cleanup = io_mkdirat_cleanup,
},
[IORING_OP_SYMLINKAT] = {
- .name = "SYMLINKAT",
.prep = io_symlinkat_prep,
.issue = io_symlinkat,
- .cleanup = io_link_cleanup,
},
[IORING_OP_LINKAT] = {
- .name = "LINKAT",
.prep = io_linkat_prep,
.issue = io_linkat,
- .cleanup = io_link_cleanup,
},
[IORING_OP_MSG_RING] = {
.needs_file = 1,
.iopoll = 1,
- .name = "MSG_RING",
.prep = io_msg_ring_prep,
.issue = io_msg_ring,
- .cleanup = io_msg_ring_cleanup,
},
[IORING_OP_FSETXATTR] = {
.needs_file = 1,
- .name = "FSETXATTR",
.prep = io_fsetxattr_prep,
.issue = io_fsetxattr,
- .cleanup = io_xattr_cleanup,
},
[IORING_OP_SETXATTR] = {
- .name = "SETXATTR",
.prep = io_setxattr_prep,
.issue = io_setxattr,
- .cleanup = io_xattr_cleanup,
},
[IORING_OP_FGETXATTR] = {
.needs_file = 1,
- .name = "FGETXATTR",
.prep = io_fgetxattr_prep,
.issue = io_fgetxattr,
- .cleanup = io_xattr_cleanup,
},
[IORING_OP_GETXATTR] = {
- .name = "GETXATTR",
.prep = io_getxattr_prep,
.issue = io_getxattr,
- .cleanup = io_xattr_cleanup,
},
[IORING_OP_SOCKET] = {
.audit_skip = 1,
- .name = "SOCKET",
#if defined(CONFIG_NET)
.prep = io_socket_prep,
.issue = io_socket,
@@ -484,72 +413,472 @@ const struct io_op_def io_op_defs[] = {
#endif
},
[IORING_OP_URING_CMD] = {
+ .buffer_select = 1,
.needs_file = 1,
.plug = 1,
- .name = "URING_CMD",
.iopoll = 1,
.iopoll_queue = 1,
- .async_size = uring_cmd_pdu_size(1),
+ .async_size = sizeof(struct io_async_cmd),
.prep = io_uring_cmd_prep,
.issue = io_uring_cmd,
- .prep_async = io_uring_cmd_prep_async,
},
[IORING_OP_SEND_ZC] = {
- .name = "SEND_ZC",
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.audit_skip = 1,
.ioprio = 1,
- .manual_alloc = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep,
.issue = io_send_zc,
- .prep_async = io_send_prep_async,
- .cleanup = io_send_zc_cleanup,
- .fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
},
[IORING_OP_SENDMSG_ZC] = {
- .name = "SENDMSG_ZC",
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
.ioprio = 1,
- .manual_alloc = 1,
#if defined(CONFIG_NET)
.async_size = sizeof(struct io_async_msghdr),
.prep = io_send_zc_prep,
.issue = io_sendmsg_zc,
- .prep_async = io_sendmsg_prep_async,
- .cleanup = io_send_zc_cleanup,
- .fail = io_sendrecv_fail,
#else
.prep = io_eopnotsupp_prep,
#endif
},
+ [IORING_OP_READ_MULTISHOT] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .pollin = 1,
+ .buffer_select = 1,
+ .audit_skip = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .prep = io_read_mshot_prep,
+ .issue = io_read_mshot,
+ },
+ [IORING_OP_WAITID] = {
+ .async_size = sizeof(struct io_waitid_async),
+ .prep = io_waitid_prep,
+ .issue = io_waitid,
+ },
+ [IORING_OP_FUTEX_WAIT] = {
+#if defined(CONFIG_FUTEX)
+ .prep = io_futex_prep,
+ .issue = io_futex_wait,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_FUTEX_WAKE] = {
+#if defined(CONFIG_FUTEX)
+ .prep = io_futex_prep,
+ .issue = io_futex_wake,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_FUTEX_WAITV] = {
+#if defined(CONFIG_FUTEX)
+ .prep = io_futexv_prep,
+ .issue = io_futexv_wait,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_FIXED_FD_INSTALL] = {
+ .needs_file = 1,
+ .prep = io_install_fixed_fd_prep,
+ .issue = io_install_fixed_fd,
+ },
+ [IORING_OP_FTRUNCATE] = {
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .prep = io_ftruncate_prep,
+ .issue = io_ftruncate,
+ },
+ [IORING_OP_BIND] = {
+#if defined(CONFIG_NET)
+ .needs_file = 1,
+ .prep = io_bind_prep,
+ .issue = io_bind,
+ .async_size = sizeof(struct io_async_msghdr),
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_LISTEN] = {
+#if defined(CONFIG_NET)
+ .needs_file = 1,
+ .prep = io_listen_prep,
+ .issue = io_listen,
+ .async_size = sizeof(struct io_async_msghdr),
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_RECV_ZC] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .pollin = 1,
+ .ioprio = 1,
+#if defined(CONFIG_NET)
+ .prep = io_recvzc_prep,
+ .issue = io_recvzc,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_EPOLL_WAIT] = {
+ .needs_file = 1,
+ .audit_skip = 1,
+ .pollin = 1,
+#if defined(CONFIG_EPOLL)
+ .prep = io_epoll_wait_prep,
+ .issue = io_epoll_wait,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
+ },
+ [IORING_OP_READV_FIXED] = {
+ .needs_file = 1,
+ .unbound_nonreg_file = 1,
+ .pollin = 1,
+ .plug = 1,
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
+ .iopoll_queue = 1,
+ .vectored = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .prep = io_prep_readv_fixed,
+ .issue = io_read,
+ },
+ [IORING_OP_WRITEV_FIXED] = {
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ .pollout = 1,
+ .plug = 1,
+ .audit_skip = 1,
+ .ioprio = 1,
+ .iopoll = 1,
+ .iopoll_queue = 1,
+ .vectored = 1,
+ .async_size = sizeof(struct io_async_rw),
+ .prep = io_prep_writev_fixed,
+ .issue = io_write,
+ },
+ [IORING_OP_PIPE] = {
+ .prep = io_pipe_prep,
+ .issue = io_pipe,
+ },
+ [IORING_OP_NOP128] = {
+ .audit_skip = 1,
+ .iopoll = 1,
+ .is_128 = 1,
+ .prep = io_nop_prep,
+ .issue = io_nop,
+ },
+ [IORING_OP_URING_CMD128] = {
+ .buffer_select = 1,
+ .needs_file = 1,
+ .plug = 1,
+ .iopoll = 1,
+ .iopoll_queue = 1,
+ .is_128 = 1,
+ .async_size = sizeof(struct io_async_cmd),
+ .prep = io_uring_cmd_prep,
+ .issue = io_uring_cmd,
+ },
+};
+
+const struct io_cold_def io_cold_defs[] = {
+ [IORING_OP_NOP] = {
+ .name = "NOP",
+ },
+ [IORING_OP_READV] = {
+ .name = "READV",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_WRITEV] = {
+ .name = "WRITEV",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_FSYNC] = {
+ .name = "FSYNC",
+ },
+ [IORING_OP_READ_FIXED] = {
+ .name = "READ_FIXED",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_WRITE_FIXED] = {
+ .name = "WRITE_FIXED",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_POLL_ADD] = {
+ .name = "POLL_ADD",
+ },
+ [IORING_OP_POLL_REMOVE] = {
+ .name = "POLL_REMOVE",
+ },
+ [IORING_OP_SYNC_FILE_RANGE] = {
+ .name = "SYNC_FILE_RANGE",
+ },
+ [IORING_OP_SENDMSG] = {
+ .name = "SENDMSG",
+#if defined(CONFIG_NET)
+ .cleanup = io_sendmsg_recvmsg_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_RECVMSG] = {
+ .name = "RECVMSG",
+#if defined(CONFIG_NET)
+ .cleanup = io_sendmsg_recvmsg_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_TIMEOUT] = {
+ .name = "TIMEOUT",
+ },
+ [IORING_OP_TIMEOUT_REMOVE] = {
+ .name = "TIMEOUT_REMOVE",
+ },
+ [IORING_OP_ACCEPT] = {
+ .name = "ACCEPT",
+ },
+ [IORING_OP_ASYNC_CANCEL] = {
+ .name = "ASYNC_CANCEL",
+ },
+ [IORING_OP_LINK_TIMEOUT] = {
+ .name = "LINK_TIMEOUT",
+ },
+ [IORING_OP_CONNECT] = {
+ .name = "CONNECT",
+ },
+ [IORING_OP_FALLOCATE] = {
+ .name = "FALLOCATE",
+ },
+ [IORING_OP_OPENAT] = {
+ .name = "OPENAT",
+ .cleanup = io_open_cleanup,
+ },
+ [IORING_OP_CLOSE] = {
+ .name = "CLOSE",
+ },
+ [IORING_OP_FILES_UPDATE] = {
+ .name = "FILES_UPDATE",
+ },
+ [IORING_OP_STATX] = {
+ .name = "STATX",
+ .cleanup = io_statx_cleanup,
+ },
+ [IORING_OP_READ] = {
+ .name = "READ",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_WRITE] = {
+ .name = "WRITE",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_FADVISE] = {
+ .name = "FADVISE",
+ },
+ [IORING_OP_MADVISE] = {
+ .name = "MADVISE",
+ },
+ [IORING_OP_SEND] = {
+ .name = "SEND",
+#if defined(CONFIG_NET)
+ .cleanup = io_sendmsg_recvmsg_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_RECV] = {
+ .name = "RECV",
+#if defined(CONFIG_NET)
+ .cleanup = io_sendmsg_recvmsg_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_OPENAT2] = {
+ .name = "OPENAT2",
+ .cleanup = io_open_cleanup,
+ },
+ [IORING_OP_EPOLL_CTL] = {
+ .name = "EPOLL",
+ },
+ [IORING_OP_SPLICE] = {
+ .name = "SPLICE",
+ .cleanup = io_splice_cleanup,
+ },
+ [IORING_OP_PROVIDE_BUFFERS] = {
+ .name = "PROVIDE_BUFFERS",
+ },
+ [IORING_OP_REMOVE_BUFFERS] = {
+ .name = "REMOVE_BUFFERS",
+ },
+ [IORING_OP_TEE] = {
+ .name = "TEE",
+ .cleanup = io_splice_cleanup,
+ },
+ [IORING_OP_SHUTDOWN] = {
+ .name = "SHUTDOWN",
+ },
+ [IORING_OP_RENAMEAT] = {
+ .name = "RENAMEAT",
+ .cleanup = io_renameat_cleanup,
+ },
+ [IORING_OP_UNLINKAT] = {
+ .name = "UNLINKAT",
+ .cleanup = io_unlinkat_cleanup,
+ },
+ [IORING_OP_MKDIRAT] = {
+ .name = "MKDIRAT",
+ .cleanup = io_mkdirat_cleanup,
+ },
+ [IORING_OP_SYMLINKAT] = {
+ .name = "SYMLINKAT",
+ .cleanup = io_link_cleanup,
+ },
+ [IORING_OP_LINKAT] = {
+ .name = "LINKAT",
+ .cleanup = io_link_cleanup,
+ },
+ [IORING_OP_MSG_RING] = {
+ .name = "MSG_RING",
+ .cleanup = io_msg_ring_cleanup,
+ },
+ [IORING_OP_FSETXATTR] = {
+ .name = "FSETXATTR",
+ .cleanup = io_xattr_cleanup,
+ },
+ [IORING_OP_SETXATTR] = {
+ .name = "SETXATTR",
+ .cleanup = io_xattr_cleanup,
+ },
+ [IORING_OP_FGETXATTR] = {
+ .name = "FGETXATTR",
+ .cleanup = io_xattr_cleanup,
+ },
+ [IORING_OP_GETXATTR] = {
+ .name = "GETXATTR",
+ .cleanup = io_xattr_cleanup,
+ },
+ [IORING_OP_SOCKET] = {
+ .name = "SOCKET",
+ },
+ [IORING_OP_URING_CMD] = {
+ .name = "URING_CMD",
+ .sqe_copy = io_uring_cmd_sqe_copy,
+ .cleanup = io_uring_cmd_cleanup,
+ },
+ [IORING_OP_SEND_ZC] = {
+ .name = "SEND_ZC",
+#if defined(CONFIG_NET)
+ .cleanup = io_send_zc_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_SENDMSG_ZC] = {
+ .name = "SENDMSG_ZC",
+#if defined(CONFIG_NET)
+ .cleanup = io_send_zc_cleanup,
+ .fail = io_sendrecv_fail,
+#endif
+ },
+ [IORING_OP_READ_MULTISHOT] = {
+ .name = "READ_MULTISHOT",
+ .cleanup = io_readv_writev_cleanup,
+ },
+ [IORING_OP_WAITID] = {
+ .name = "WAITID",
+ },
+ [IORING_OP_FUTEX_WAIT] = {
+ .name = "FUTEX_WAIT",
+ },
+ [IORING_OP_FUTEX_WAKE] = {
+ .name = "FUTEX_WAKE",
+ },
+ [IORING_OP_FUTEX_WAITV] = {
+ .name = "FUTEX_WAITV",
+ },
+ [IORING_OP_FIXED_FD_INSTALL] = {
+ .name = "FIXED_FD_INSTALL",
+ },
+ [IORING_OP_FTRUNCATE] = {
+ .name = "FTRUNCATE",
+ },
+ [IORING_OP_BIND] = {
+ .name = "BIND",
+ },
+ [IORING_OP_LISTEN] = {
+ .name = "LISTEN",
+ },
+ [IORING_OP_RECV_ZC] = {
+ .name = "RECV_ZC",
+ },
+ [IORING_OP_EPOLL_WAIT] = {
+ .name = "EPOLL_WAIT",
+ },
+ [IORING_OP_READV_FIXED] = {
+ .name = "READV_FIXED",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_WRITEV_FIXED] = {
+ .name = "WRITEV_FIXED",
+ .cleanup = io_readv_writev_cleanup,
+ .fail = io_rw_fail,
+ },
+ [IORING_OP_PIPE] = {
+ .name = "PIPE",
+ },
+ [IORING_OP_NOP128] = {
+ .name = "NOP128",
+ },
+ [IORING_OP_URING_CMD128] = {
+ .name = "URING_CMD128",
+ .sqe_copy = io_uring_cmd_sqe_copy,
+ .cleanup = io_uring_cmd_cleanup,
+ },
};
const char *io_uring_get_opcode(u8 opcode)
{
if (opcode < IORING_OP_LAST)
- return io_op_defs[opcode].name;
+ return io_cold_defs[opcode].name;
return "INVALID";
}
+bool io_uring_op_supported(u8 opcode)
+{
+ if (opcode < IORING_OP_LAST &&
+ io_issue_defs[opcode].prep != io_eopnotsupp_prep)
+ return true;
+ return false;
+}
+
void __init io_uring_optable_init(void)
{
int i;
- BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
+ BUILD_BUG_ON(ARRAY_SIZE(io_cold_defs) != IORING_OP_LAST);
+ BUILD_BUG_ON(ARRAY_SIZE(io_issue_defs) != IORING_OP_LAST);
- for (i = 0; i < ARRAY_SIZE(io_op_defs); i++) {
- BUG_ON(!io_op_defs[i].prep);
- if (io_op_defs[i].prep != io_eopnotsupp_prep)
- BUG_ON(!io_op_defs[i].issue);
- WARN_ON_ONCE(!io_op_defs[i].name);
+ for (i = 0; i < ARRAY_SIZE(io_issue_defs); i++) {
+ BUG_ON(!io_issue_defs[i].prep);
+ if (io_issue_defs[i].prep != io_eopnotsupp_prep)
+ BUG_ON(!io_issue_defs[i].issue);
+ WARN_ON_ONCE(!io_cold_defs[i].name);
}
}
diff --git a/io_uring/opdef.h b/io_uring/opdef.h
index df7e13d9bfba..aa37846880ff 100644
--- a/io_uring/opdef.h
+++ b/io_uring/opdef.h
@@ -2,11 +2,17 @@
#ifndef IOU_OP_DEF_H
#define IOU_OP_DEF_H
-struct io_op_def {
+struct io_issue_def {
/* needs req->file assigned */
unsigned needs_file : 1;
/* should block plug */
unsigned plug : 1;
+ /* supports ioprio */
+ unsigned ioprio : 1;
+ /* supports iopoll */
+ unsigned iopoll : 1;
+ /* op supports buffer selection */
+ unsigned buffer_select : 1;
/* hash wq insertion if file is a regular file */
unsigned hash_reg_file : 1;
/* unbound wq insertion if file is a non-regular file */
@@ -15,33 +21,34 @@ struct io_op_def {
unsigned pollin : 1;
unsigned pollout : 1;
unsigned poll_exclusive : 1;
- /* op supports buffer selection */
- unsigned buffer_select : 1;
- /* opcode is not supported by this kernel */
- unsigned not_supported : 1;
/* skip auditing */
unsigned audit_skip : 1;
- /* supports ioprio */
- unsigned ioprio : 1;
- /* supports iopoll */
- unsigned iopoll : 1;
/* have to be put into the iopoll list */
unsigned iopoll_queue : 1;
- /* opcode specific path will handle ->async_data allocation if needed */
- unsigned manual_alloc : 1;
+ /* vectored opcode, set if 1) vectored, and 2) handler needs to know */
+ unsigned vectored : 1;
+ /* set to 1 if this opcode uses 128b sqes in a mixed sq */
+ unsigned is_128 : 1;
+
/* size of async data needed, if any */
unsigned short async_size;
+ int (*issue)(struct io_kiocb *, unsigned int);
+ int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
+};
+
+struct io_cold_def {
const char *name;
- int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
- int (*issue)(struct io_kiocb *, unsigned int);
- int (*prep_async)(struct io_kiocb *);
+ void (*sqe_copy)(struct io_kiocb *);
void (*cleanup)(struct io_kiocb *);
void (*fail)(struct io_kiocb *);
};
-extern const struct io_op_def io_op_defs[];
+extern const struct io_issue_def io_issue_defs[];
+extern const struct io_cold_def io_cold_defs[];
+
+bool io_uring_op_supported(u8 opcode);
void io_uring_optable_init(void);
#endif
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index 67178e4bb282..bfeb91b31bba 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -6,12 +6,15 @@
#include <linux/fdtable.h>
#include <linux/fsnotify.h>
#include <linux/namei.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/watch_queue.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "../fs/internal.h"
+#include "filetable.h"
#include "io_uring.h"
#include "rsrc.h"
#include "openclose.h"
@@ -31,6 +34,22 @@ struct io_close {
u32 file_slot;
};
+struct io_fixed_install {
+ struct file *file;
+ unsigned int o_flags;
+};
+
+static bool io_openat_force_async(struct io_open *open)
+{
+ /*
+ * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
+ * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
+ * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
+ * async for.
+ */
+ return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
+}
+
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
@@ -61,6 +80,8 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
open->nofile = rlimit(RLIMIT_NOFILE);
req->flags |= REQ_F_NEED_CLEANUP;
+ if (io_openat_force_async(open))
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -108,12 +129,7 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
nonblock_set = op.open_flag & O_NONBLOCK;
resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
if (issue_flags & IO_URING_F_NONBLOCK) {
- /*
- * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
- * it'll always -EAGAIN
- */
- if (open->how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
- return -EAGAIN;
+ WARN_ON_ONCE(io_openat_force_async(open));
op.lookup_flags |= LOOKUP_CACHED;
op.open_flag |= O_NONBLOCK;
}
@@ -144,7 +160,6 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
file->f_flags &= ~O_NONBLOCK;
- fsnotify_open(file);
if (!fixed)
fd_install(ret, file);
@@ -157,7 +172,7 @@ err:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_openat(struct io_kiocb *req, unsigned int issue_flags)
@@ -213,7 +228,6 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
struct files_struct *files = current->files;
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
- struct fdtable *fdt;
struct file *file;
int ret = -EBADF;
@@ -223,13 +237,7 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
}
spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (close->fd >= fdt->max_fds) {
- spin_unlock(&files->file_lock);
- goto err;
- }
- file = rcu_dereference_protected(fdt->fd[close->fd],
- lockdep_is_held(&files->file_lock));
+ file = files_lookup_fd_locked(files, close->fd);
if (!file || io_is_uring_fops(file)) {
spin_unlock(&files->file_lock);
goto err;
@@ -241,7 +249,7 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
- file = __close_fd_get_file(close->fd);
+ file = file_close_fd_locked(files, close->fd);
spin_unlock(&files->file_lock);
if (!file)
goto err;
@@ -252,5 +260,177 @@ err:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
+}
+
+int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_fixed_install *ifi;
+ unsigned int flags;
+
+ if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
+ sqe->splice_fd_in || sqe->addr3)
+ return -EINVAL;
+
+ /* must be a fixed file */
+ if (!(req->flags & REQ_F_FIXED_FILE))
+ return -EBADF;
+
+ flags = READ_ONCE(sqe->install_fd_flags);
+ if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
+ return -EINVAL;
+
+ /* ensure the task's creds are used when installing/receiving fds */
+ if (req->flags & REQ_F_CREDS)
+ return -EPERM;
+
+ /* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
+ ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
+ ifi->o_flags = O_CLOEXEC;
+ if (flags & IORING_FIXED_FD_NO_CLOEXEC)
+ ifi->o_flags = 0;
+
+ return 0;
+}
+
+int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_fixed_install *ifi;
+ int ret;
+
+ ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
+ ret = receive_fd(req->file, NULL, ifi->o_flags);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+}
+
+struct io_pipe {
+ struct file *file;
+ int __user *fds;
+ int flags;
+ int file_slot;
+ unsigned long nofile;
+};
+
+int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
+
+ if (sqe->fd || sqe->off || sqe->addr3)
+ return -EINVAL;
+
+ p->fds = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ p->flags = READ_ONCE(sqe->pipe_flags);
+ if (p->flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
+ return -EINVAL;
+
+ p->file_slot = READ_ONCE(sqe->file_index);
+ p->nofile = rlimit(RLIMIT_NOFILE);
+ return 0;
+}
+
+static int io_pipe_fixed(struct io_kiocb *req, struct file **files,
+ unsigned int issue_flags)
+{
+ struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, fds[2] = { -1, -1 };
+ int slot = p->file_slot;
+
+ if (p->flags & O_CLOEXEC)
+ return -EINVAL;
+
+ io_ring_submit_lock(ctx, issue_flags);
+
+ ret = __io_fixed_fd_install(ctx, files[0], slot);
+ if (ret < 0)
+ goto err;
+ fds[0] = ret;
+ files[0] = NULL;
+
+ /*
+ * If a specific slot is given, next one will be used for
+ * the write side.
+ */
+ if (slot != IORING_FILE_INDEX_ALLOC)
+ slot++;
+
+ ret = __io_fixed_fd_install(ctx, files[1], slot);
+ if (ret < 0)
+ goto err;
+ fds[1] = ret;
+ files[1] = NULL;
+
+ io_ring_submit_unlock(ctx, issue_flags);
+
+ if (!copy_to_user(p->fds, fds, sizeof(fds)))
+ return 0;
+
+ ret = -EFAULT;
+ io_ring_submit_lock(ctx, issue_flags);
+err:
+ if (fds[0] != -1)
+ io_fixed_fd_remove(ctx, fds[0]);
+ if (fds[1] != -1)
+ io_fixed_fd_remove(ctx, fds[1]);
+ io_ring_submit_unlock(ctx, issue_flags);
+ return ret;
+}
+
+static int io_pipe_fd(struct io_kiocb *req, struct file **files)
+{
+ struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
+ int ret, fds[2] = { -1, -1 };
+
+ ret = __get_unused_fd_flags(p->flags, p->nofile);
+ if (ret < 0)
+ goto err;
+ fds[0] = ret;
+
+ ret = __get_unused_fd_flags(p->flags, p->nofile);
+ if (ret < 0)
+ goto err;
+ fds[1] = ret;
+
+ if (!copy_to_user(p->fds, fds, sizeof(fds))) {
+ fd_install(fds[0], files[0]);
+ fd_install(fds[1], files[1]);
+ return 0;
+ }
+ ret = -EFAULT;
+err:
+ if (fds[0] != -1)
+ put_unused_fd(fds[0]);
+ if (fds[1] != -1)
+ put_unused_fd(fds[1]);
+ return ret;
+}
+
+int io_pipe(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
+ struct file *files[2];
+ int ret;
+
+ ret = create_pipe_files(files, p->flags);
+ if (ret)
+ return ret;
+
+ if (!!p->file_slot)
+ ret = io_pipe_fixed(req, files, issue_flags);
+ else
+ ret = io_pipe_fd(req, files);
+
+ io_req_set_res(req, ret, 0);
+ if (!ret)
+ return IOU_COMPLETE;
+
+ req_set_fail(req);
+ if (files[0])
+ fput(files[0]);
+ if (files[1])
+ fput(files[1]);
+ return ret;
}
diff --git a/io_uring/openclose.h b/io_uring/openclose.h
index 4b1c28d3a66c..4ca2a9935abc 100644
--- a/io_uring/openclose.h
+++ b/io_uring/openclose.h
@@ -12,3 +12,9 @@ int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_close(struct io_kiocb *req, unsigned int issue_flags);
+
+int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_pipe(struct io_kiocb *req, unsigned int issue_flags);
+
+int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 2ac1366adbd7..aac4b3b881fb 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -14,7 +14,9 @@
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "alloc_cache.h"
#include "refs.h"
+#include "napi.h"
#include "opdef.h"
#include "kbuf.h"
#include "poll.h"
@@ -51,6 +53,9 @@ struct io_poll_table {
#define IO_WQE_F_DOUBLE 1
+static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key);
+
static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
{
unsigned long priv = (unsigned long)wqe->private;
@@ -117,62 +122,39 @@ static void io_poll_req_insert(struct io_kiocb *req)
{
struct io_hash_table *table = &req->ctx->cancel_table;
u32 index = hash_long(req->cqe.user_data, table->hash_bits);
- struct io_hash_bucket *hb = &table->hbs[index];
-
- spin_lock(&hb->lock);
- hlist_add_head(&req->hash_node, &hb->list);
- spin_unlock(&hb->lock);
-}
-
-static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
-{
- struct io_hash_table *table = &req->ctx->cancel_table;
- u32 index = hash_long(req->cqe.user_data, table->hash_bits);
- spinlock_t *lock = &table->hbs[index].lock;
-
- spin_lock(lock);
- hash_del(&req->hash_node);
- spin_unlock(lock);
-}
-
-static void io_poll_req_insert_locked(struct io_kiocb *req)
-{
- struct io_hash_table *table = &req->ctx->cancel_table_locked;
- u32 index = hash_long(req->cqe.user_data, table->hash_bits);
lockdep_assert_held(&req->ctx->uring_lock);
hlist_add_head(&req->hash_node, &table->hbs[index].list);
}
-static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- if (req->flags & REQ_F_HASH_LOCKED) {
- /*
- * ->cancel_table_locked is protected by ->uring_lock in
- * contrast to per bucket spinlocks. Likely, tctx_task_work()
- * already grabbed the mutex for us, but there is a chance it
- * failed.
- */
- io_tw_lock(ctx, locked);
- hash_del(&req->hash_node);
- req->flags &= ~REQ_F_HASH_LOCKED;
- } else {
- io_poll_req_delete(req, ctx);
- }
-}
-
-static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
- wait_queue_func_t wake_func)
+static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
{
poll->head = NULL;
#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
/* mask in events that we always want/need */
poll->events = events | IO_POLL_UNMASK;
INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, wake_func);
+ init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+}
+
+static void io_poll_remove_waitq(struct io_poll *poll)
+{
+ /*
+ * If the waitqueue is being freed early but someone is already holds
+ * ownership over it, we have to tear down the request as best we can.
+ * That means immediately removing the request from its waitqueue and
+ * preventing all further accesses to the waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon as req->head is
+ * NULL'ed out, the request can be completed and freed, since
+ * io_poll_remove_entry() will no longer need to take the waitqueue
+ * lock.
+ */
+ smp_store_release(&poll->head, NULL);
}
static inline void io_poll_remove_entry(struct io_poll *poll)
@@ -181,8 +163,7 @@ static inline void io_poll_remove_entry(struct io_poll *poll)
if (head) {
spin_lock_irq(&head->lock);
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
spin_unlock_irq(&head->lock);
}
}
@@ -224,8 +205,29 @@ enum {
IOU_POLL_NO_ACTION = 1,
IOU_POLL_REMOVE_POLL_USE_RES = 2,
IOU_POLL_REISSUE = 3,
+ IOU_POLL_REQUEUE = 4,
};
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+ unsigned flags = 0;
+
+ io_req_set_res(req, mask, 0);
+ req->io_task_work.func = io_poll_task_func;
+
+ trace_io_uring_task_add(req, mask);
+
+ if (!(req->flags & REQ_F_POLL_NO_LAZY))
+ flags = IOU_F_TWQ_LAZY_WAKE;
+ __io_req_task_work_add(req, flags);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, res);
+}
+
/*
* All poll tw should go through this. Checks for poll events, manages
* references, does rewait, etc.
@@ -236,12 +238,11 @@ enum {
* req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
* poll and that the result is stored in req->cqe.
*/
-static int io_poll_check_events(struct io_kiocb *req, bool *locked)
+static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
{
int v;
- /* req->task == current here, checking PF_EXITING is safe */
- if (unlikely(req->task->flags & PF_EXITING))
+ if (unlikely(tw.cancel))
return -ECANCELED;
do {
@@ -298,16 +299,18 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
__poll_t mask = mangle_poll(req->cqe.res &
req->apoll_events);
- if (!io_aux_cqe(req->ctx, *locked, req->cqe.user_data,
- mask, IORING_CQE_F_MORE, false)) {
+ if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
io_req_set_res(req, mask, 0);
return IOU_POLL_REMOVE_POLL_USE_RES;
}
} else {
- int ret = io_poll_issue(req, locked);
- if (ret == IOU_STOP_MULTISHOT)
+ int ret = io_poll_issue(req, tw);
+
+ if (ret == IOU_COMPLETE)
return IOU_POLL_REMOVE_POLL_USE_RES;
- if (ret < 0)
+ else if (ret == IOU_REQUEUE)
+ return IOU_POLL_REQUEUE;
+ if (ret != IOU_RETRY && ret < 0)
return ret;
}
@@ -318,21 +321,28 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
* Release all references, retry if someone tried to restart
* task_work while we were executing it.
*/
- } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) &
- IO_POLL_REF_MASK);
+ v &= IO_POLL_REF_MASK;
+ } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
+ io_napi_add(req);
return IOU_POLL_NO_ACTION;
}
-static void io_poll_task_func(struct io_kiocb *req, bool *locked)
+void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *req = tw_req.req;
int ret;
- ret = io_poll_check_events(req, locked);
- if (ret == IOU_POLL_NO_ACTION)
+ ret = io_poll_check_events(req, tw);
+ if (ret == IOU_POLL_NO_ACTION) {
+ return;
+ } else if (ret == IOU_POLL_REQUEUE) {
+ __io_poll_execute(req, 0);
return;
+ }
io_poll_remove_entries(req);
- io_poll_tw_hash_eject(req, locked);
+ /* task_work always has ->uring_lock held */
+ hash_del(&req->hash_node);
if (req->opcode == IORING_OP_POLL_ADD) {
if (ret == IOU_POLL_DONE) {
@@ -341,7 +351,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
poll = io_kiocb_to_cmd(req, struct io_poll);
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
} else if (ret == IOU_POLL_REISSUE) {
- io_req_task_submit(req, locked);
+ io_req_task_submit(tw_req, tw);
return;
} else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
req->cqe.res = ret;
@@ -349,34 +359,19 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
}
io_req_set_res(req, req->cqe.res, 0);
- io_req_task_complete(req, locked);
+ io_req_task_complete(tw_req, tw);
} else {
- io_tw_lock(req->ctx, locked);
+ io_tw_lock(req->ctx, tw);
if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
- io_req_task_complete(req, locked);
+ io_req_task_complete(tw_req, tw);
else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
- io_req_task_submit(req, locked);
+ io_req_task_submit(tw_req, tw);
else
io_req_defer_failed(req, ret);
}
}
-static void __io_poll_execute(struct io_kiocb *req, int mask)
-{
- io_req_set_res(req, mask, 0);
- req->io_task_work.func = io_poll_task_func;
-
- trace_io_uring_task_add(req, mask);
- io_req_task_work_add(req);
-}
-
-static inline void io_poll_execute(struct io_kiocb *req, int res)
-{
- if (io_poll_get_ownership(req))
- __io_poll_execute(req, res);
-}
-
static void io_poll_cancel_req(struct io_kiocb *req)
{
io_poll_mark_cancelled(req);
@@ -391,23 +386,7 @@ static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
io_poll_mark_cancelled(req);
/* we have to kick tw in case it's not already */
io_poll_execute(req, 0);
-
- /*
- * If the waitqueue is being freed early but someone is already
- * holds ownership over it, we have to tear down the request as
- * best we can. That means immediately removing the request from
- * its waitqueue and preventing all further accesses to the
- * waitqueue via the request.
- */
- list_del_init(&poll->wait.entry);
-
- /*
- * Careful: this *must* be the last step, since as soon
- * as req->head is NULL'ed out, the request can be
- * completed and freed, since aio_poll_complete_work()
- * will no longer need to take the waitqueue lock.
- */
- smp_store_release(&poll->head, NULL);
+ io_poll_remove_waitq(poll);
return 1;
}
@@ -436,8 +415,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
/* optional, saves extra locking for removal in tw handler */
if (mask && poll->events & EPOLLONESHOT) {
- list_del_init(&poll->wait.entry);
- poll->head = NULL;
+ io_poll_remove_waitq(poll);
if (wqe_is_double(wait))
req->flags &= ~REQ_F_DOUBLE_POLL;
else
@@ -508,7 +486,7 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
/* mark as double wq entry */
wqe_private |= IO_WQE_F_DOUBLE;
- io_init_poll_iocb(poll, first->events, first->wait.func);
+ io_init_poll_iocb(poll, first->events);
if (!io_poll_double_prepare(req)) {
/* the request is completing, just back off */
kfree(poll);
@@ -524,10 +502,11 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
poll->head = head;
poll->wait.private = (void *) wqe_private;
- if (poll->events & EPOLLEXCLUSIVE)
+ if (poll->events & EPOLLEXCLUSIVE) {
add_wait_queue_exclusive(head, &poll->wait);
- else
+ } else {
add_wait_queue(head, &poll->wait);
+ }
}
static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
@@ -546,12 +525,13 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
return pt->owning || io_poll_get_ownership(req);
}
-static void io_poll_add_hash(struct io_kiocb *req)
+static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
{
- if (req->flags & REQ_F_HASH_LOCKED)
- io_poll_req_insert_locked(req);
- else
- io_poll_req_insert(req);
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ io_poll_req_insert(req);
+ io_ring_submit_unlock(ctx, issue_flags);
}
/*
@@ -565,11 +545,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
struct io_poll_table *ipt, __poll_t mask,
unsigned issue_flags)
{
- struct io_ring_ctx *ctx = req->ctx;
-
INIT_HLIST_NODE(&req->hash_node);
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
- io_init_poll_iocb(poll, mask, io_poll_wake);
+ io_init_poll_iocb(poll, mask);
poll->file = req->file;
req->apoll_events = poll->events;
@@ -591,9 +568,15 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
atomic_set(&req->poll_refs, (int)ipt->owning);
- /* io-wq doesn't hold uring_lock */
- if (issue_flags & IO_URING_F_UNLOCKED)
- req->flags &= ~REQ_F_HASH_LOCKED;
+ /*
+ * Exclusive waits may only wake a limited amount of entries
+ * rather than all of them, this may interfere with lazy
+ * wake if someone does wait(events > 1). Ensure we don't do
+ * lazy wake for those, as we need to process each one as they
+ * come in.
+ */
+ if (poll->events & EPOLLEXCLUSIVE)
+ req->flags |= REQ_F_POLL_NO_LAZY;
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
@@ -613,7 +596,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
if (mask &&
((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
if (!io_poll_can_finish_inline(req, ipt)) {
- io_poll_add_hash(req);
+ io_poll_add_hash(req, issue_flags);
return 0;
}
io_poll_remove_entries(req);
@@ -622,13 +605,14 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
return 1;
}
- io_poll_add_hash(req);
+ io_poll_add_hash(req, issue_flags);
if (mask && (poll->events & EPOLLET) &&
io_poll_can_finish_inline(req, ipt)) {
__io_poll_execute(req, mask);
return 0;
}
+ io_napi_add(req);
if (ipt->owning) {
/*
@@ -650,54 +634,74 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
+/*
+ * We can't reliably detect loops in repeated poll triggers and issue
+ * subsequently failing. But rather than fail these immediately, allow a
+ * certain amount of retries before we give up. Given that this condition
+ * should _rarely_ trigger even once, we should be fine with a larger value.
+ */
+#define APOLL_MAX_RETRY 128
+
static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_cache_entry *entry;
struct async_poll *apoll;
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
- } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- entry = io_alloc_cache_get(&ctx->apoll_cache);
- if (entry == NULL)
- goto alloc_apoll;
- apoll = container_of(entry, struct async_poll, cache);
} else {
-alloc_apoll:
- apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
- if (unlikely(!apoll))
+ if (!(issue_flags & IO_URING_F_UNLOCKED))
+ apoll = io_cache_alloc(&ctx->apoll_cache, GFP_ATOMIC);
+ else
+ apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ if (!apoll)
return NULL;
+ apoll->poll.retries = APOLL_MAX_RETRY;
}
apoll->double_poll = NULL;
req->apoll = apoll;
+ if (unlikely(!--apoll->poll.retries))
+ return NULL;
return apoll;
}
-int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
{
- const struct io_op_def *def = &io_op_defs[req->opcode];
struct async_poll *apoll;
struct io_poll_table ipt;
- __poll_t mask = POLLPRI | POLLERR | EPOLLET;
int ret;
- /*
- * apoll requests already grab the mutex to complete in the tw handler,
- * so removal from the mutex-backed hash is free, use it by default.
- */
- req->flags |= REQ_F_HASH_LOCKED;
+ mask |= EPOLLET;
+ if (!io_file_can_poll(req))
+ return IO_APOLL_ABORTED;
+ if (!(req->flags & REQ_F_APOLL_MULTISHOT))
+ mask |= EPOLLONESHOT;
- if (!def->pollin && !def->pollout)
+ apoll = io_req_alloc_apoll(req, issue_flags);
+ if (!apoll)
return IO_APOLL_ABORTED;
- if (!file_can_poll(req->file))
+ req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
+ req->flags |= REQ_F_POLLED;
+ ipt.pt._qproc = io_async_queue_proc;
+
+ ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
+ if (ret)
+ return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
+ trace_io_uring_poll_arm(req, mask, apoll->poll.events);
+ return IO_APOLL_OK;
+}
+
+int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ __poll_t mask = POLLPRI | POLLERR;
+
+ if (!def->pollin && !def->pollout)
return IO_APOLL_ABORTED;
- if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
+ if (!io_file_can_poll(req))
return IO_APOLL_ABORTED;
- if (!(req->flags & REQ_F_APOLL_MULTISHOT))
- mask |= EPOLLONESHOT;
if (def->pollin) {
mask |= EPOLLIN | EPOLLRDNORM;
@@ -711,116 +715,72 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
if (def->poll_exclusive)
mask |= EPOLLEXCLUSIVE;
- apoll = io_req_alloc_apoll(req, issue_flags);
- if (!apoll)
- return IO_APOLL_ABORTED;
- req->flags |= REQ_F_POLLED;
- ipt.pt._qproc = io_async_queue_proc;
-
- io_kbuf_recycle(req, issue_flags);
-
- ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
- if (ret)
- return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
- trace_io_uring_poll_arm(req, mask, apoll->poll.events);
- return IO_APOLL_OK;
+ return io_arm_apoll(req, issue_flags, mask);
}
-static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
- struct io_hash_table *table,
- bool cancel_all)
+/*
+ * Returns true if we found and killed one or more poll requests
+ */
+__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ bool cancel_all)
{
- unsigned nr_buckets = 1U << table->hash_bits;
+ unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
struct hlist_node *tmp;
struct io_kiocb *req;
bool found = false;
int i;
+ lockdep_assert_held(&ctx->uring_lock);
+
for (i = 0; i < nr_buckets; i++) {
- struct io_hash_bucket *hb = &table->hbs[i];
+ struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
- spin_lock(&hb->lock);
hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
- if (io_match_task_safe(req, tsk, cancel_all)) {
+ if (io_match_task_safe(req, tctx, cancel_all)) {
hlist_del_init(&req->hash_node);
io_poll_cancel_req(req);
found = true;
}
}
- spin_unlock(&hb->lock);
}
return found;
}
-/*
- * Returns true if we found and killed one or more poll requests
- */
-__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
- bool cancel_all)
- __must_hold(&ctx->uring_lock)
-{
- bool ret;
-
- ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
- ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
- return ret;
-}
-
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
- struct io_cancel_data *cd,
- struct io_hash_table *table,
- struct io_hash_bucket **out_bucket)
+ struct io_cancel_data *cd)
{
struct io_kiocb *req;
- u32 index = hash_long(cd->data, table->hash_bits);
- struct io_hash_bucket *hb = &table->hbs[index];
+ u32 index = hash_long(cd->data, ctx->cancel_table.hash_bits);
+ struct io_hash_bucket *hb = &ctx->cancel_table.hbs[index];
- *out_bucket = NULL;
-
- spin_lock(&hb->lock);
hlist_for_each_entry(req, &hb->list, hash_node) {
if (cd->data != req->cqe.user_data)
continue;
if (poll_only && req->opcode != IORING_OP_POLL_ADD)
continue;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
- if (cd->seq == req->work.cancel_seq)
+ if (io_cancel_match_sequence(req, cd->seq))
continue;
- req->work.cancel_seq = cd->seq;
}
- *out_bucket = hb;
return req;
}
- spin_unlock(&hb->lock);
return NULL;
}
static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
- struct io_cancel_data *cd,
- struct io_hash_table *table,
- struct io_hash_bucket **out_bucket)
+ struct io_cancel_data *cd)
{
- unsigned nr_buckets = 1U << table->hash_bits;
+ unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits;
struct io_kiocb *req;
int i;
- *out_bucket = NULL;
-
for (i = 0; i < nr_buckets; i++) {
- struct io_hash_bucket *hb = &table->hbs[i];
+ struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
- spin_lock(&hb->lock);
hlist_for_each_entry(req, &hb->list, hash_node) {
- if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
- req->file != cd->file)
- continue;
- if (cd->seq == req->work.cancel_seq)
- continue;
- req->work.cancel_seq = cd->seq;
- *out_bucket = hb;
- return req;
+ if (io_cancel_req_match(req, cd))
+ return req;
}
- spin_unlock(&hb->lock);
}
return NULL;
}
@@ -836,22 +796,21 @@ static int io_poll_disarm(struct io_kiocb *req)
return 0;
}
-static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
- struct io_hash_table *table)
+static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
{
- struct io_hash_bucket *bucket;
struct io_kiocb *req;
- if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
- req = io_poll_file_find(ctx, cd, table, &bucket);
+ if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
+ IORING_ASYNC_CANCEL_ANY))
+ req = io_poll_file_find(ctx, cd);
else
- req = io_poll_find(ctx, false, cd, table, &bucket);
+ req = io_poll_find(ctx, false, cd);
- if (req)
+ if (req) {
io_poll_cancel_req(req);
- if (bucket)
- spin_unlock(&bucket->lock);
- return req ? 0 : -ENOENT;
+ return 0;
+ }
+ return -ENOENT;
}
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
@@ -859,12 +818,8 @@ int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
{
int ret;
- ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
- if (ret != -ENOENT)
- return ret;
-
io_ring_submit_lock(ctx, issue_flags);
- ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
+ ret = __io_poll_cancel(ctx, cd);
io_ring_submit_unlock(ctx, issue_flags);
return ret;
}
@@ -941,17 +896,10 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
ipt.pt._qproc = io_poll_queue_proc;
- /*
- * If sqpoll or single issuer, there is no contention for ->uring_lock
- * and we'll end up holding it in tw handlers anyway.
- */
- if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
- req->flags |= REQ_F_HASH_LOCKED;
-
ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
if (ret > 0) {
io_req_set_res(req, ipt.result_mask, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
return ret ?: IOU_ISSUE_SKIP_COMPLETE;
}
@@ -959,36 +907,18 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
- struct io_cancel_data cd = { .data = poll_update->old_user_data, };
struct io_ring_ctx *ctx = req->ctx;
- struct io_hash_bucket *bucket;
+ struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
struct io_kiocb *preq;
int ret2, ret = 0;
- bool locked;
-
- preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
- ret2 = io_poll_disarm(preq);
- if (bucket)
- spin_unlock(&bucket->lock);
- if (!ret2)
- goto found;
- if (ret2 != -ENOENT) {
- ret = ret2;
- goto out;
- }
io_ring_submit_lock(ctx, issue_flags);
- preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
+ preq = io_poll_find(ctx, true, &cd);
ret2 = io_poll_disarm(preq);
- if (bucket)
- spin_unlock(&bucket->lock);
- io_ring_submit_unlock(ctx, issue_flags);
if (ret2) {
ret = ret2;
goto out;
}
-
-found:
if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
ret = -EFAULT;
goto out;
@@ -1006,27 +936,28 @@ found:
if (poll_update->update_user_data)
preq->cqe.user_data = poll_update->new_user_data;
- ret2 = io_poll_add(preq, issue_flags);
+ ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
/* successfully updated, don't complete poll request */
- if (!ret2 || ret2 == -EIOCBQUEUED)
+ if (ret2 == IOU_ISSUE_SKIP_COMPLETE)
goto out;
+ /* request completed as part of the update, complete it */
+ else if (ret2 == IOU_COMPLETE)
+ goto complete;
}
- req_set_fail(preq);
io_req_set_res(preq, -ECANCELED, 0);
- locked = !(issue_flags & IO_URING_F_UNLOCKED);
- io_req_task_complete(preq, &locked);
+complete:
+ if (preq->cqe.res < 0)
+ req_set_fail(preq);
+ preq->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(preq);
out:
+ io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0) {
req_set_fail(req);
return ret;
}
/* complete update request, we're done with it */
io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
-void io_apoll_cache_free(struct io_cache_entry *entry)
-{
- kfree(container_of(entry, struct async_poll, cache));
+ return IOU_COMPLETE;
}
diff --git a/io_uring/poll.h b/io_uring/poll.h
index 5f3bae50fc81..5647c5138932 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include "alloc_cache.h"
+#include <linux/io_uring_types.h>
+
+#define IO_POLL_ALLOC_CACHE_MAX 32
enum {
IO_APOLL_OK,
@@ -12,17 +14,24 @@ struct io_poll {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
+ int retries;
struct wait_queue_entry wait;
};
struct async_poll {
- union {
- struct io_poll poll;
- struct io_cache_entry cache;
- };
+ struct io_poll poll;
struct io_poll *double_poll;
};
+/*
+ * Must only be called inside issue_flags & IO_URING_F_MULTISHOT, or
+ * potentially other cases where we already "own" this poll request.
+ */
+static inline void io_poll_multishot_retry(struct io_kiocb *req)
+{
+ atomic_inc(&req->poll_refs);
+}
+
int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
@@ -32,8 +41,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
struct io_cancel_data;
int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
unsigned issue_flags);
+int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask);
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
-bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
+bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
bool cancel_all);
-void io_apoll_cache_free(struct io_cache_entry *entry);
+void io_poll_task_func(struct io_tw_req tw_req, io_tw_token_t tw);
diff --git a/io_uring/query.c b/io_uring/query.c
new file mode 100644
index 000000000000..abdd6f3e1223
--- /dev/null
+++ b/io_uring/query.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "linux/io_uring/query.h"
+
+#include "query.h"
+#include "io_uring.h"
+#include "zcrx.h"
+
+union io_query_data {
+ struct io_uring_query_opcode opcodes;
+ struct io_uring_query_zcrx zcrx;
+ struct io_uring_query_scq scq;
+};
+
+#define IO_MAX_QUERY_SIZE sizeof(union io_query_data)
+#define IO_MAX_QUERY_ENTRIES 1000
+
+static ssize_t io_query_ops(union io_query_data *data)
+{
+ struct io_uring_query_opcode *e = &data->opcodes;
+
+ e->nr_request_opcodes = IORING_OP_LAST;
+ e->nr_register_opcodes = IORING_REGISTER_LAST;
+ e->feature_flags = IORING_FEAT_FLAGS;
+ e->ring_setup_flags = IORING_SETUP_FLAGS;
+ e->enter_flags = IORING_ENTER_FLAGS;
+ e->sqe_flags = SQE_VALID_FLAGS;
+ e->nr_query_opcodes = __IO_URING_QUERY_MAX;
+ e->__pad = 0;
+ return sizeof(*e);
+}
+
+static ssize_t io_query_zcrx(union io_query_data *data)
+{
+ struct io_uring_query_zcrx *e = &data->zcrx;
+
+ e->register_flags = ZCRX_REG_IMPORT;
+ e->area_flags = IORING_ZCRX_AREA_DMABUF;
+ e->nr_ctrl_opcodes = __ZCRX_CTRL_LAST;
+ e->rq_hdr_size = sizeof(struct io_uring);
+ e->rq_hdr_alignment = L1_CACHE_BYTES;
+ e->__resv1 = 0;
+ e->__resv2 = 0;
+ return sizeof(*e);
+}
+
+static ssize_t io_query_scq(union io_query_data *data)
+{
+ struct io_uring_query_scq *e = &data->scq;
+
+ e->hdr_size = sizeof(struct io_rings);
+ e->hdr_alignment = SMP_CACHE_BYTES;
+ return sizeof(*e);
+}
+
+static int io_handle_query_entry(union io_query_data *data, void __user *uhdr,
+ u64 *next_entry)
+{
+ struct io_uring_query_hdr hdr;
+ size_t usize, res_size = 0;
+ ssize_t ret = -EINVAL;
+ void __user *udata;
+
+ if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
+ return -EFAULT;
+ usize = hdr.size;
+ hdr.size = min(hdr.size, IO_MAX_QUERY_SIZE);
+ udata = u64_to_user_ptr(hdr.query_data);
+
+ if (hdr.query_op >= __IO_URING_QUERY_MAX) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ if (!mem_is_zero(hdr.__resv, sizeof(hdr.__resv)) || hdr.result || !hdr.size)
+ goto out;
+ if (copy_from_user(data, udata, hdr.size))
+ return -EFAULT;
+
+ switch (hdr.query_op) {
+ case IO_URING_QUERY_OPCODES:
+ ret = io_query_ops(data);
+ break;
+ case IO_URING_QUERY_ZCRX:
+ ret = io_query_zcrx(data);
+ break;
+ case IO_URING_QUERY_SCQ:
+ ret = io_query_scq(data);
+ break;
+ }
+
+ if (ret >= 0) {
+ if (WARN_ON_ONCE(ret > IO_MAX_QUERY_SIZE))
+ return -EFAULT;
+ res_size = ret;
+ ret = 0;
+ }
+out:
+ hdr.result = ret;
+ hdr.size = min_t(size_t, usize, res_size);
+
+ if (copy_struct_to_user(udata, usize, data, hdr.size, NULL))
+ return -EFAULT;
+ if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
+ return -EFAULT;
+ *next_entry = hdr.next_entry;
+ return 0;
+}
+
+int io_query(void __user *arg, unsigned nr_args)
+{
+ union io_query_data entry_buffer;
+ void __user *uhdr = arg;
+ int ret, nr = 0;
+
+ memset(&entry_buffer, 0, sizeof(entry_buffer));
+
+ if (nr_args)
+ return -EINVAL;
+
+ while (uhdr) {
+ u64 next_hdr;
+
+ ret = io_handle_query_entry(&entry_buffer, uhdr, &next_hdr);
+ if (ret)
+ return ret;
+ uhdr = u64_to_user_ptr(next_hdr);
+
+ /* Have some limit to avoid a potential cycle */
+ if (++nr >= IO_MAX_QUERY_ENTRIES)
+ return -ERANGE;
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ cond_resched();
+ }
+ return 0;
+}
diff --git a/io_uring/query.h b/io_uring/query.h
new file mode 100644
index 000000000000..b35eb52f0ea8
--- /dev/null
+++ b/io_uring/query.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef IORING_QUERY_H
+#define IORING_QUERY_H
+
+#include <linux/io_uring_types.h>
+
+int io_query(void __user *arg, unsigned nr_args);
+
+#endif
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 1336de3f2a30..0d928d87c4ed 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -17,6 +17,13 @@ static inline bool req_ref_inc_not_zero(struct io_kiocb *req)
return atomic_inc_not_zero(&req->refs);
}
+static inline bool req_ref_put_and_test_atomic(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(data_race(req->flags) & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ return atomic_dec_and_test(&req->refs);
+}
+
static inline bool req_ref_put_and_test(struct io_kiocb *req)
{
if (likely(!(req->flags & REQ_F_REFCOUNT)))
@@ -33,6 +40,13 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
+static inline void req_ref_put(struct io_kiocb *req)
+{
+ WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ atomic_dec(&req->refs);
+}
+
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {
diff --git a/io_uring/register.c b/io_uring/register.c
new file mode 100644
index 000000000000..62d39b3ff317
--- /dev/null
+++ b/io_uring/register.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code related to the io_uring_register() syscall
+ *
+ * Copyright (C) 2023 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/refcount.h>
+#include <linux/bits.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/nospec.h>
+#include <linux/compat.h>
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+
+#include "filetable.h"
+#include "io_uring.h"
+#include "opdef.h"
+#include "tctx.h"
+#include "rsrc.h"
+#include "sqpoll.h"
+#include "register.h"
+#include "cancel.h"
+#include "kbuf.h"
+#include "napi.h"
+#include "eventfd.h"
+#include "msg_ring.h"
+#include "memmap.h"
+#include "zcrx.h"
+#include "query.h"
+
+#define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \
+ IORING_REGISTER_LAST + IORING_OP_LAST)
+
+static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
+ unsigned nr_args)
+{
+ struct io_uring_probe *p;
+ size_t size;
+ int i, ret;
+
+ if (nr_args > IORING_OP_LAST)
+ nr_args = IORING_OP_LAST;
+
+ size = struct_size(p, ops, nr_args);
+ p = memdup_user(arg, size);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+ ret = -EINVAL;
+ if (memchr_inv(p, 0, size))
+ goto out;
+
+ p->last_op = IORING_OP_LAST - 1;
+
+ for (i = 0; i < nr_args; i++) {
+ p->ops[i].op = i;
+ if (io_uring_op_supported(i))
+ p->ops[i].flags = IO_URING_OP_SUPPORTED;
+ }
+ p->ops_len = i;
+
+ ret = 0;
+ if (copy_to_user(arg, p, size))
+ ret = -EFAULT;
+out:
+ kfree(p);
+ return ret;
+}
+
+int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
+{
+ const struct cred *creds;
+
+ creds = xa_erase(&ctx->personalities, id);
+ if (creds) {
+ put_cred(creds);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+
+static int io_register_personality(struct io_ring_ctx *ctx)
+{
+ const struct cred *creds;
+ u32 id;
+ int ret;
+
+ creds = get_current_cred();
+
+ ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
+ XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
+ if (ret < 0) {
+ put_cred(creds);
+ return ret;
+ }
+ return id;
+}
+
+static __cold int io_parse_restrictions(void __user *arg, unsigned int nr_args,
+ struct io_restriction *restrictions)
+{
+ struct io_uring_restriction *res;
+ size_t size;
+ int i, ret;
+
+ if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
+ return -EINVAL;
+
+ size = array_size(nr_args, sizeof(*res));
+ if (size == SIZE_MAX)
+ return -EOVERFLOW;
+
+ res = memdup_user(arg, size);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ ret = -EINVAL;
+
+ for (i = 0; i < nr_args; i++) {
+ switch (res[i].opcode) {
+ case IORING_RESTRICTION_REGISTER_OP:
+ if (res[i].register_op >= IORING_REGISTER_LAST)
+ goto err;
+ __set_bit(res[i].register_op, restrictions->register_op);
+ break;
+ case IORING_RESTRICTION_SQE_OP:
+ if (res[i].sqe_op >= IORING_OP_LAST)
+ goto err;
+ __set_bit(res[i].sqe_op, restrictions->sqe_op);
+ break;
+ case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
+ restrictions->sqe_flags_allowed = res[i].sqe_flags;
+ break;
+ case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
+ restrictions->sqe_flags_required = res[i].sqe_flags;
+ break;
+ default:
+ goto err;
+ }
+ }
+
+ ret = 0;
+
+err:
+ kfree(res);
+ return ret;
+}
+
+static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
+ void __user *arg, unsigned int nr_args)
+{
+ int ret;
+
+ /* Restrictions allowed only if rings started disabled */
+ if (!(ctx->flags & IORING_SETUP_R_DISABLED))
+ return -EBADFD;
+
+ /* We allow only a single restrictions registration */
+ if (ctx->restrictions.registered)
+ return -EBUSY;
+
+ ret = io_parse_restrictions(arg, nr_args, &ctx->restrictions);
+ /* Reset all restrictions if an error happened */
+ if (ret != 0)
+ memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
+ else
+ ctx->restrictions.registered = true;
+ return ret;
+}
+
+static int io_register_enable_rings(struct io_ring_ctx *ctx)
+{
+ if (!(ctx->flags & IORING_SETUP_R_DISABLED))
+ return -EBADFD;
+
+ if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) {
+ WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
+ /*
+ * Lazy activation attempts would fail if it was polled before
+ * submitter_task is set.
+ */
+ if (wq_has_sleeper(&ctx->poll_wq))
+ io_activate_pollwq(ctx);
+ }
+
+ if (ctx->restrictions.registered)
+ ctx->restricted = 1;
+
+ ctx->flags &= ~IORING_SETUP_R_DISABLED;
+ if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
+ wake_up(&ctx->sq_data->wait);
+ return 0;
+}
+
+static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
+ cpumask_var_t new_mask)
+{
+ int ret;
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+ ret = io_wq_cpu_affinity(current->io_uring, new_mask);
+ } else {
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ return ret;
+}
+
+static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
+ void __user *arg, unsigned len)
+{
+ cpumask_var_t new_mask;
+ int ret;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_clear(new_mask);
+ if (len > cpumask_size())
+ len = cpumask_size();
+
+#ifdef CONFIG_COMPAT
+ if (in_compat_syscall())
+ ret = compat_get_bitmap(cpumask_bits(new_mask),
+ (const compat_ulong_t __user *)arg,
+ len * 8 /* CHAR_BIT */);
+ else
+#endif
+ ret = copy_from_user(new_mask, arg, len);
+
+ if (ret) {
+ free_cpumask_var(new_mask);
+ return -EFAULT;
+ }
+
+ ret = __io_register_iowq_aff(ctx, new_mask);
+ free_cpumask_var(new_mask);
+ return ret;
+}
+
+static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
+{
+ return __io_register_iowq_aff(ctx, NULL);
+}
+
+static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
+ void __user *arg)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_tctx_node *node;
+ struct io_uring_task *tctx = NULL;
+ struct io_sq_data *sqd = NULL;
+ __u32 new_count[2];
+ int i, ret;
+
+ if (copy_from_user(new_count, arg, sizeof(new_count)))
+ return -EFAULT;
+ for (i = 0; i < ARRAY_SIZE(new_count); i++)
+ if (new_count[i] > INT_MAX)
+ return -EINVAL;
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ sqd = ctx->sq_data;
+ if (sqd) {
+ struct task_struct *tsk;
+
+ /*
+ * Observe the correct sqd->lock -> ctx->uring_lock
+ * ordering. Fine to drop uring_lock here, we hold
+ * a ref to the ctx.
+ */
+ refcount_inc(&sqd->refs);
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&sqd->lock);
+ mutex_lock(&ctx->uring_lock);
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk)
+ tctx = tsk->io_uring;
+ }
+ } else {
+ tctx = current->io_uring;
+ }
+
+ BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
+
+ for (i = 0; i < ARRAY_SIZE(new_count); i++)
+ if (new_count[i])
+ ctx->iowq_limits[i] = new_count[i];
+ ctx->iowq_limits_set = true;
+
+ if (tctx && tctx->io_wq) {
+ ret = io_wq_max_workers(tctx->io_wq, new_count);
+ if (ret)
+ goto err;
+ } else {
+ memset(new_count, 0, sizeof(new_count));
+ }
+
+ if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ if (copy_to_user(arg, new_count, sizeof(new_count)))
+ return -EFAULT;
+
+ /* that's it for SQPOLL, only the SQPOLL task creates requests */
+ if (sqd)
+ return 0;
+
+ /* now propagate the restriction to all registered users */
+ list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
+ tctx = node->task->io_uring;
+ if (WARN_ON_ONCE(!tctx->io_wq))
+ continue;
+
+ for (i = 0; i < ARRAY_SIZE(new_count); i++)
+ new_count[i] = ctx->iowq_limits[i];
+ /* ignore errors, it always returns zero anyway */
+ (void)io_wq_max_workers(tctx->io_wq, new_count);
+ }
+ return 0;
+err:
+ if (sqd) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ mutex_lock(&ctx->uring_lock);
+ }
+ return ret;
+}
+
+static int io_register_clock(struct io_ring_ctx *ctx,
+ struct io_uring_clock_register __user *arg)
+{
+ struct io_uring_clock_register reg;
+
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+ if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)))
+ return -EINVAL;
+
+ switch (reg.clockid) {
+ case CLOCK_MONOTONIC:
+ ctx->clock_offset = 0;
+ break;
+ case CLOCK_BOOTTIME:
+ ctx->clock_offset = TK_OFFS_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ctx->clockid = reg.clockid;
+ return 0;
+}
+
+/*
+ * State to maintain until we can swap. Both new and old state, used for
+ * either mapping or freeing.
+ */
+struct io_ring_ctx_rings {
+ struct io_rings *rings;
+ struct io_uring_sqe *sq_sqes;
+
+ struct io_mapped_region sq_region;
+ struct io_mapped_region ring_region;
+};
+
+static void io_register_free_rings(struct io_ring_ctx *ctx,
+ struct io_ring_ctx_rings *r)
+{
+ io_free_region(ctx->user, &r->sq_region);
+ io_free_region(ctx->user, &r->ring_region);
+}
+
+#define swap_old(ctx, o, n, field) \
+ do { \
+ (o).field = (ctx)->field; \
+ (ctx)->field = (n).field; \
+ } while (0)
+
+#define RESIZE_FLAGS (IORING_SETUP_CQSIZE | IORING_SETUP_CLAMP)
+#define COPY_FLAGS (IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQE128 | \
+ IORING_SETUP_CQE32 | IORING_SETUP_NO_MMAP | \
+ IORING_SETUP_CQE_MIXED | IORING_SETUP_SQE_MIXED)
+
+static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_ctx_config config;
+ struct io_uring_region_desc rd;
+ struct io_ring_ctx_rings o = { }, n = { }, *to_free = NULL;
+ unsigned i, tail, old_head;
+ struct io_uring_params *p = &config.p;
+ struct io_rings_layout *rl = &config.layout;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+ /* limited to DEFER_TASKRUN for now */
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ if (copy_from_user(p, arg, sizeof(*p)))
+ return -EFAULT;
+ if (p->flags & ~RESIZE_FLAGS)
+ return -EINVAL;
+
+ /* properties that are always inherited */
+ p->flags |= (ctx->flags & COPY_FLAGS);
+
+ ret = io_prepare_config(&config);
+ if (unlikely(ret))
+ return ret;
+
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(rl->rings_size);
+ if (p->flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p->cq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
+ }
+ ret = io_create_region(ctx, &n.ring_region, &rd, IORING_OFF_CQ_RING);
+ if (ret)
+ return ret;
+
+ n.rings = io_region_get_ptr(&n.ring_region);
+
+ /*
+ * At this point n.rings is shared with userspace, just like o.rings
+ * is as well. While we don't expect userspace to modify it while
+ * a resize is in progress, and it's most likely that userspace will
+ * shoot itself in the foot if it does, we can't always assume good
+ * intent... Use read/write once helpers from here on to indicate the
+ * shared nature of it.
+ */
+ WRITE_ONCE(n.rings->sq_ring_mask, p->sq_entries - 1);
+ WRITE_ONCE(n.rings->cq_ring_mask, p->cq_entries - 1);
+ WRITE_ONCE(n.rings->sq_ring_entries, p->sq_entries);
+ WRITE_ONCE(n.rings->cq_ring_entries, p->cq_entries);
+
+ if (copy_to_user(arg, p, sizeof(*p))) {
+ io_register_free_rings(ctx, &n);
+ return -EFAULT;
+ }
+
+ memset(&rd, 0, sizeof(rd));
+ rd.size = PAGE_ALIGN(rl->sq_size);
+ if (p->flags & IORING_SETUP_NO_MMAP) {
+ rd.user_addr = p->sq_off.user_addr;
+ rd.flags |= IORING_MEM_REGION_TYPE_USER;
+ }
+ ret = io_create_region(ctx, &n.sq_region, &rd, IORING_OFF_SQES);
+ if (ret) {
+ io_register_free_rings(ctx, &n);
+ return ret;
+ }
+ n.sq_sqes = io_region_get_ptr(&n.sq_region);
+
+ /*
+ * If using SQPOLL, park the thread
+ */
+ if (ctx->sq_data) {
+ mutex_unlock(&ctx->uring_lock);
+ io_sq_thread_park(ctx->sq_data);
+ mutex_lock(&ctx->uring_lock);
+ }
+
+ /*
+ * We'll do the swap. Grab the ctx->mmap_lock, which will exclude
+ * any new mmap's on the ring fd. Clear out existing mappings to prevent
+ * mmap from seeing them, as we'll unmap them. Any attempt to mmap
+ * existing rings beyond this point will fail. Not that it could proceed
+ * at this point anyway, as the io_uring mmap side needs go grab the
+ * ctx->mmap_lock as well. Likewise, hold the completion lock over the
+ * duration of the actual swap.
+ */
+ mutex_lock(&ctx->mmap_lock);
+ spin_lock(&ctx->completion_lock);
+ o.rings = ctx->rings;
+ ctx->rings = NULL;
+ o.sq_sqes = ctx->sq_sqes;
+ ctx->sq_sqes = NULL;
+
+ /*
+ * Now copy SQ and CQ entries, if any. If either of the destination
+ * rings can't hold what is already there, then fail the operation.
+ */
+ tail = READ_ONCE(o.rings->sq.tail);
+ old_head = READ_ONCE(o.rings->sq.head);
+ if (tail - old_head > p->sq_entries)
+ goto overflow;
+ for (i = old_head; i < tail; i++) {
+ unsigned src_head = i & (ctx->sq_entries - 1);
+ unsigned dst_head = i & (p->sq_entries - 1);
+
+ n.sq_sqes[dst_head] = o.sq_sqes[src_head];
+ }
+ WRITE_ONCE(n.rings->sq.head, old_head);
+ WRITE_ONCE(n.rings->sq.tail, tail);
+
+ tail = READ_ONCE(o.rings->cq.tail);
+ old_head = READ_ONCE(o.rings->cq.head);
+ if (tail - old_head > p->cq_entries) {
+overflow:
+ /* restore old rings, and return -EOVERFLOW via cleanup path */
+ ctx->rings = o.rings;
+ ctx->sq_sqes = o.sq_sqes;
+ to_free = &n;
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ for (i = old_head; i < tail; i++) {
+ unsigned src_head = i & (ctx->cq_entries - 1);
+ unsigned dst_head = i & (p->cq_entries - 1);
+
+ n.rings->cqes[dst_head] = o.rings->cqes[src_head];
+ }
+ WRITE_ONCE(n.rings->cq.head, old_head);
+ WRITE_ONCE(n.rings->cq.tail, tail);
+ /* invalidate cached cqe refill */
+ ctx->cqe_cached = ctx->cqe_sentinel = NULL;
+
+ WRITE_ONCE(n.rings->sq_dropped, READ_ONCE(o.rings->sq_dropped));
+ atomic_set(&n.rings->sq_flags, atomic_read(&o.rings->sq_flags));
+ WRITE_ONCE(n.rings->cq_flags, READ_ONCE(o.rings->cq_flags));
+ WRITE_ONCE(n.rings->cq_overflow, READ_ONCE(o.rings->cq_overflow));
+
+ /* all done, store old pointers and assign new ones */
+ if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
+ ctx->sq_array = (u32 *)((char *)n.rings + rl->sq_array_offset);
+
+ ctx->sq_entries = p->sq_entries;
+ ctx->cq_entries = p->cq_entries;
+
+ ctx->rings = n.rings;
+ ctx->sq_sqes = n.sq_sqes;
+ swap_old(ctx, o, n, ring_region);
+ swap_old(ctx, o, n, sq_region);
+ to_free = &o;
+ ret = 0;
+out:
+ spin_unlock(&ctx->completion_lock);
+ mutex_unlock(&ctx->mmap_lock);
+ io_register_free_rings(ctx, to_free);
+
+ if (ctx->sq_data)
+ io_sq_thread_unpark(ctx->sq_data);
+
+ return ret;
+}
+
+static int io_register_mem_region(struct io_ring_ctx *ctx, void __user *uarg)
+{
+ struct io_uring_mem_region_reg __user *reg_uptr = uarg;
+ struct io_uring_mem_region_reg reg;
+ struct io_uring_region_desc __user *rd_uptr;
+ struct io_uring_region_desc rd;
+ struct io_mapped_region region = {};
+ int ret;
+
+ if (io_region_is_set(&ctx->param_region))
+ return -EBUSY;
+ if (copy_from_user(&reg, reg_uptr, sizeof(reg)))
+ return -EFAULT;
+ rd_uptr = u64_to_user_ptr(reg.region_uptr);
+ if (copy_from_user(&rd, rd_uptr, sizeof(rd)))
+ return -EFAULT;
+ if (memchr_inv(&reg.__resv, 0, sizeof(reg.__resv)))
+ return -EINVAL;
+ if (reg.flags & ~IORING_MEM_REGION_REG_WAIT_ARG)
+ return -EINVAL;
+
+ /*
+ * This ensures there are no waiters. Waiters are unlocked and it's
+ * hard to synchronise with them, especially if we need to initialise
+ * the region.
+ */
+ if ((reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) &&
+ !(ctx->flags & IORING_SETUP_R_DISABLED))
+ return -EINVAL;
+
+ ret = io_create_region(ctx, &region, &rd, IORING_MAP_OFF_PARAM_REGION);
+ if (ret)
+ return ret;
+ if (copy_to_user(rd_uptr, &rd, sizeof(rd))) {
+ io_free_region(ctx->user, &region);
+ return -EFAULT;
+ }
+
+ if (reg.flags & IORING_MEM_REGION_REG_WAIT_ARG) {
+ ctx->cq_wait_arg = io_region_get_ptr(&region);
+ ctx->cq_wait_size = rd.size;
+ }
+
+ io_region_publish(ctx, &region, &ctx->param_region);
+ return 0;
+}
+
+static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
+ void __user *arg, unsigned nr_args)
+ __releases(ctx->uring_lock)
+ __acquires(ctx->uring_lock)
+{
+ int ret;
+
+ /*
+ * We don't quiesce the refs for register anymore and so it can't be
+ * dying as we're holding a file ref here.
+ */
+ if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
+ return -ENXIO;
+
+ if (ctx->submitter_task && ctx->submitter_task != current)
+ return -EEXIST;
+
+ if (ctx->restricted) {
+ opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
+ if (!test_bit(opcode, ctx->restrictions.register_op))
+ return -EACCES;
+ }
+
+ switch (opcode) {
+ case IORING_REGISTER_BUFFERS:
+ ret = -EFAULT;
+ if (!arg)
+ break;
+ ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
+ break;
+ case IORING_UNREGISTER_BUFFERS:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_sqe_buffers_unregister(ctx);
+ break;
+ case IORING_REGISTER_FILES:
+ ret = -EFAULT;
+ if (!arg)
+ break;
+ ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
+ break;
+ case IORING_UNREGISTER_FILES:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_sqe_files_unregister(ctx);
+ break;
+ case IORING_REGISTER_FILES_UPDATE:
+ ret = io_register_files_update(ctx, arg, nr_args);
+ break;
+ case IORING_REGISTER_EVENTFD:
+ ret = -EINVAL;
+ if (nr_args != 1)
+ break;
+ ret = io_eventfd_register(ctx, arg, 0);
+ break;
+ case IORING_REGISTER_EVENTFD_ASYNC:
+ ret = -EINVAL;
+ if (nr_args != 1)
+ break;
+ ret = io_eventfd_register(ctx, arg, 1);
+ break;
+ case IORING_UNREGISTER_EVENTFD:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_eventfd_unregister(ctx);
+ break;
+ case IORING_REGISTER_PROBE:
+ ret = -EINVAL;
+ if (!arg || nr_args > 256)
+ break;
+ ret = io_probe(ctx, arg, nr_args);
+ break;
+ case IORING_REGISTER_PERSONALITY:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_register_personality(ctx);
+ break;
+ case IORING_UNREGISTER_PERSONALITY:
+ ret = -EINVAL;
+ if (arg)
+ break;
+ ret = io_unregister_personality(ctx, nr_args);
+ break;
+ case IORING_REGISTER_ENABLE_RINGS:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_register_enable_rings(ctx);
+ break;
+ case IORING_REGISTER_RESTRICTIONS:
+ ret = io_register_restrictions(ctx, arg, nr_args);
+ break;
+ case IORING_REGISTER_FILES2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_FILES_UPDATE2:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_FILE);
+ break;
+ case IORING_REGISTER_BUFFERS2:
+ ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
+ break;
+ case IORING_REGISTER_BUFFERS_UPDATE:
+ ret = io_register_rsrc_update(ctx, arg, nr_args,
+ IORING_RSRC_BUFFER);
+ break;
+ case IORING_REGISTER_IOWQ_AFF:
+ ret = -EINVAL;
+ if (!arg || !nr_args)
+ break;
+ ret = io_register_iowq_aff(ctx, arg, nr_args);
+ break;
+ case IORING_UNREGISTER_IOWQ_AFF:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_unregister_iowq_aff(ctx);
+ break;
+ case IORING_REGISTER_IOWQ_MAX_WORKERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 2)
+ break;
+ ret = io_register_iowq_max_workers(ctx, arg);
+ break;
+ case IORING_REGISTER_RING_FDS:
+ ret = io_ringfd_register(ctx, arg, nr_args);
+ break;
+ case IORING_UNREGISTER_RING_FDS:
+ ret = io_ringfd_unregister(ctx, arg, nr_args);
+ break;
+ case IORING_REGISTER_PBUF_RING:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_pbuf_ring(ctx, arg);
+ break;
+ case IORING_UNREGISTER_PBUF_RING:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_unregister_pbuf_ring(ctx, arg);
+ break;
+ case IORING_REGISTER_SYNC_CANCEL:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_sync_cancel(ctx, arg);
+ break;
+ case IORING_REGISTER_FILE_ALLOC_RANGE:
+ ret = -EINVAL;
+ if (!arg || nr_args)
+ break;
+ ret = io_register_file_alloc_range(ctx, arg);
+ break;
+ case IORING_REGISTER_PBUF_STATUS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_pbuf_status(ctx, arg);
+ break;
+ case IORING_REGISTER_NAPI:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_napi(ctx, arg);
+ break;
+ case IORING_UNREGISTER_NAPI:
+ ret = -EINVAL;
+ if (nr_args != 1)
+ break;
+ ret = io_unregister_napi(ctx, arg);
+ break;
+ case IORING_REGISTER_CLOCK:
+ ret = -EINVAL;
+ if (!arg || nr_args)
+ break;
+ ret = io_register_clock(ctx, arg);
+ break;
+ case IORING_REGISTER_CLONE_BUFFERS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_clone_buffers(ctx, arg);
+ break;
+ case IORING_REGISTER_ZCRX_IFQ:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_zcrx_ifq(ctx, arg);
+ break;
+ case IORING_REGISTER_RESIZE_RINGS:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_resize_rings(ctx, arg);
+ break;
+ case IORING_REGISTER_MEM_REGION:
+ ret = -EINVAL;
+ if (!arg || nr_args != 1)
+ break;
+ ret = io_register_mem_region(ctx, arg);
+ break;
+ case IORING_REGISTER_QUERY:
+ ret = io_query(arg, nr_args);
+ break;
+ case IORING_REGISTER_ZCRX_CTRL:
+ ret = io_zcrx_ctrl(ctx, arg, nr_args);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Given an 'fd' value, return the ctx associated with if. If 'registered' is
+ * true, then the registered index is used. Otherwise, the normal fd table.
+ * Caller must call fput() on the returned file, unless it's an ERR_PTR.
+ */
+struct file *io_uring_register_get_file(unsigned int fd, bool registered)
+{
+ struct file *file;
+
+ if (registered) {
+ /*
+ * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
+ * need only dereference our task private array to find it.
+ */
+ struct io_uring_task *tctx = current->io_uring;
+
+ if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
+ return ERR_PTR(-EINVAL);
+ fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
+ file = tctx->registered_rings[fd];
+ if (file)
+ get_file(file);
+ } else {
+ file = fget(fd);
+ }
+
+ if (unlikely(!file))
+ return ERR_PTR(-EBADF);
+ if (io_is_uring_fops(file))
+ return file;
+ fput(file);
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static int io_uring_register_send_msg_ring(void __user *arg, unsigned int nr_args)
+{
+ struct io_uring_sqe sqe;
+
+ if (!arg || nr_args != 1)
+ return -EINVAL;
+ if (copy_from_user(&sqe, arg, sizeof(sqe)))
+ return -EFAULT;
+ /* no flags supported */
+ if (sqe.flags)
+ return -EINVAL;
+ if (sqe.opcode != IORING_OP_MSG_RING)
+ return -EINVAL;
+
+ return io_uring_sync_msg_ring(&sqe);
+}
+
+/*
+ * "blind" registration opcodes are ones where there's no ring given, and
+ * hence the source fd must be -1.
+ */
+static int io_uring_register_blind(unsigned int opcode, void __user *arg,
+ unsigned int nr_args)
+{
+ switch (opcode) {
+ case IORING_REGISTER_SEND_MSG_RING:
+ return io_uring_register_send_msg_ring(arg, nr_args);
+ case IORING_REGISTER_QUERY:
+ return io_query(arg, nr_args);
+ }
+ return -EINVAL;
+}
+
+SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
+ void __user *, arg, unsigned int, nr_args)
+{
+ struct io_ring_ctx *ctx;
+ long ret = -EBADF;
+ struct file *file;
+ bool use_registered_ring;
+
+ use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING);
+ opcode &= ~IORING_REGISTER_USE_REGISTERED_RING;
+
+ if (opcode >= IORING_REGISTER_LAST)
+ return -EINVAL;
+
+ if (fd == -1)
+ return io_uring_register_blind(opcode, arg, nr_args);
+
+ file = io_uring_register_get_file(fd, use_registered_ring);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ ctx = file->private_data;
+
+ mutex_lock(&ctx->uring_lock);
+ ret = __io_uring_register(ctx, opcode, arg, nr_args);
+
+ trace_io_uring_register(ctx, opcode, ctx->file_table.data.nr,
+ ctx->buf_table.nr, ret);
+ mutex_unlock(&ctx->uring_lock);
+
+ fput(file);
+ return ret;
+}
diff --git a/io_uring/register.h b/io_uring/register.h
new file mode 100644
index 000000000000..a5f39d5ef9e0
--- /dev/null
+++ b/io_uring/register.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef IORING_REGISTER_H
+#define IORING_REGISTER_H
+
+int io_eventfd_unregister(struct io_ring_ctx *ctx);
+int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id);
+struct file *io_uring_register_get_file(unsigned int fd, bool registered);
+
+#endif
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 18de10c68a15..a63474b331bf 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -9,12 +9,16 @@
#include <linux/hugetlb.h>
#include <linux/compat.h>
#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "openclose.h"
#include "rsrc.h"
+#include "memmap.h"
+#include "register.h"
struct io_rsrc_update {
struct file *file;
@@ -23,24 +27,14 @@ struct io_rsrc_update {
u32 offset;
};
-static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
- struct io_mapped_ubuf **pimu,
- struct page **last_hpage);
-
-#define IO_RSRC_REF_BATCH 100
+static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
+ struct iovec *iov, struct page **last_hpage);
/* only define max */
#define IORING_MAX_FIXED_FILES (1U << 20)
#define IORING_MAX_REG_BUFFERS (1U << 14)
-void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- if (ctx->rsrc_cached_refs) {
- io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs);
- ctx->rsrc_cached_refs = 0;
- }
-}
+#define IO_CACHED_BVECS_SEGS 32
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
@@ -62,60 +56,48 @@ int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
return 0;
}
-static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
+void io_unaccount_mem(struct user_struct *user, struct mm_struct *mm_account,
+ unsigned long nr_pages)
{
- if (ctx->user)
- __io_unaccount_mem(ctx->user, nr_pages);
+ if (user)
+ __io_unaccount_mem(user, nr_pages);
- if (ctx->mm_account)
- atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
+ if (mm_account)
+ atomic64_sub(nr_pages, &mm_account->pinned_vm);
}
-static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
+int io_account_mem(struct user_struct *user, struct mm_struct *mm_account,
+ unsigned long nr_pages)
{
int ret;
- if (ctx->user) {
- ret = __io_account_mem(ctx->user, nr_pages);
+ if (user) {
+ ret = __io_account_mem(user, nr_pages);
if (ret)
return ret;
}
- if (ctx->mm_account)
- atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
+ if (mm_account)
+ atomic64_add(nr_pages, &mm_account->pinned_vm);
return 0;
}
-static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
- void __user *arg, unsigned index)
+int io_validate_user_buf_range(u64 uaddr, u64 ulen)
{
- struct iovec __user *src;
-
-#ifdef CONFIG_COMPAT
- if (ctx->compat) {
- struct compat_iovec __user *ciovs;
- struct compat_iovec ciov;
+ unsigned long tmp, base = (unsigned long)uaddr;
+ unsigned long acct_len = (unsigned long)PAGE_ALIGN(ulen);
- ciovs = (struct compat_iovec __user *) arg;
- if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
- return -EFAULT;
-
- dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
- dst->iov_len = ciov.iov_len;
- return 0;
- }
-#endif
- src = (struct iovec __user *) arg;
- if (copy_from_user(dst, &src[index], sizeof(*dst)))
+ /* arbitrary limit, but we need something */
+ if (ulen > SZ_1G || !ulen)
return -EFAULT;
+ if (check_add_overflow(base, acct_len, &tmp))
+ return -EOVERFLOW;
return 0;
}
static int io_buffer_validate(struct iovec *iov)
{
- unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
-
/*
* Don't impose further limits on the size and buffer
* constraints here, we'll -EINVAL later when IO is
@@ -123,326 +105,122 @@ static int io_buffer_validate(struct iovec *iov)
*/
if (!iov->iov_base)
return iov->iov_len ? -EFAULT : 0;
- if (!iov->iov_len)
- return -EFAULT;
- /* arbitrary limit, but we need something */
- if (iov->iov_len > SZ_1G)
- return -EFAULT;
-
- if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
- return -EOVERFLOW;
-
- return 0;
+ return io_validate_user_buf_range((unsigned long)iov->iov_base,
+ iov->iov_len);
}
-static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
+static void io_release_ubuf(void *priv)
{
- struct io_mapped_ubuf *imu = *slot;
+ struct io_mapped_ubuf *imu = priv;
unsigned int i;
- if (imu != ctx->dummy_ubuf) {
- for (i = 0; i < imu->nr_bvecs; i++)
- unpin_user_page(imu->bvec[i].bv_page);
- if (imu->acct_pages)
- io_unaccount_mem(ctx, imu->acct_pages);
- kvfree(imu);
- }
- *slot = NULL;
-}
-
-void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH;
- percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH);
-}
-
-static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
-{
- struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
- struct io_ring_ctx *ctx = rsrc_data->ctx;
- struct io_rsrc_put *prsrc, *tmp;
-
- list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
- list_del(&prsrc->list);
-
- if (prsrc->tag) {
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
- mutex_unlock(&ctx->uring_lock);
- } else {
- io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
- }
- }
-
- rsrc_data->do_put(ctx, prsrc);
- kfree(prsrc);
- }
-
- io_rsrc_node_destroy(ref_node);
- if (atomic_dec_and_test(&rsrc_data->refs))
- complete(&rsrc_data->done);
-}
-
-void io_rsrc_put_work(struct work_struct *work)
-{
- struct io_ring_ctx *ctx;
- struct llist_node *node;
-
- ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
- node = llist_del_all(&ctx->rsrc_put_llist);
+ for (i = 0; i < imu->nr_bvecs; i++) {
+ struct folio *folio = page_folio(imu->bvec[i].bv_page);
- while (node) {
- struct io_rsrc_node *ref_node;
- struct llist_node *next = node->next;
-
- ref_node = llist_entry(node, struct io_rsrc_node, llist);
- __io_rsrc_put_work(ref_node);
- node = next;
+ unpin_user_folio(folio, 1);
}
}
-void io_rsrc_put_tw(struct callback_head *cb)
+static struct io_mapped_ubuf *io_alloc_imu(struct io_ring_ctx *ctx,
+ int nr_bvecs)
{
- struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
- rsrc_put_tw);
-
- io_rsrc_put_work(&ctx->rsrc_put_work.work);
+ if (nr_bvecs <= IO_CACHED_BVECS_SEGS)
+ return io_cache_alloc(&ctx->imu_cache, GFP_KERNEL);
+ return kvmalloc(struct_size_t(struct io_mapped_ubuf, bvec, nr_bvecs),
+ GFP_KERNEL);
}
-void io_wait_rsrc_data(struct io_rsrc_data *data)
+static void io_free_imu(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
{
- if (data && !atomic_dec_and_test(&data->refs))
- wait_for_completion(&data->done);
-}
-
-void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
-{
- percpu_ref_exit(&ref_node->refs);
- kfree(ref_node);
+ if (imu->nr_bvecs <= IO_CACHED_BVECS_SEGS)
+ io_cache_free(&ctx->imu_cache, imu);
+ else
+ kvfree(imu);
}
-static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
+static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf *imu)
{
- struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
- struct io_ring_ctx *ctx = node->rsrc_data->ctx;
- unsigned long flags;
- bool first_add = false;
- unsigned long delay = HZ;
-
- spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
- node->done = true;
-
- /* if we are mid-quiesce then do not delay */
- if (node->rsrc_data->quiesce)
- delay = 0;
-
- while (!list_empty(&ctx->rsrc_ref_list)) {
- node = list_first_entry(&ctx->rsrc_ref_list,
- struct io_rsrc_node, node);
- /* recycle ref nodes in order */
- if (!node->done)
- break;
- list_del(&node->node);
- first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
- }
- spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
-
- if (!first_add)
- return;
-
- if (ctx->submitter_task) {
- if (!task_work_add(ctx->submitter_task, &ctx->rsrc_put_tw,
- ctx->notify_method))
+ if (unlikely(refcount_read(&imu->refs) > 1)) {
+ if (!refcount_dec_and_test(&imu->refs))
return;
}
- mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
-}
-static struct io_rsrc_node *io_rsrc_node_alloc(void)
-{
- struct io_rsrc_node *ref_node;
-
- ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
- if (!ref_node)
- return NULL;
-
- if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero,
- 0, GFP_KERNEL)) {
- kfree(ref_node);
- return NULL;
- }
- INIT_LIST_HEAD(&ref_node->node);
- INIT_LIST_HEAD(&ref_node->rsrc_list);
- ref_node->done = false;
- return ref_node;
+ if (imu->acct_pages)
+ io_unaccount_mem(ctx->user, ctx->mm_account, imu->acct_pages);
+ imu->release(imu->priv);
+ io_free_imu(ctx, imu);
}
-void io_rsrc_node_switch(struct io_ring_ctx *ctx,
- struct io_rsrc_data *data_to_kill)
- __must_hold(&ctx->uring_lock)
+struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type)
{
- WARN_ON_ONCE(!ctx->rsrc_backup_node);
- WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
-
- io_rsrc_refs_drop(ctx);
-
- if (data_to_kill) {
- struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
-
- rsrc_node->rsrc_data = data_to_kill;
- spin_lock_irq(&ctx->rsrc_ref_lock);
- list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
- spin_unlock_irq(&ctx->rsrc_ref_lock);
-
- atomic_inc(&data_to_kill->refs);
- percpu_ref_kill(&rsrc_node->refs);
- ctx->rsrc_node = NULL;
- }
-
- if (!ctx->rsrc_node) {
- ctx->rsrc_node = ctx->rsrc_backup_node;
- ctx->rsrc_backup_node = NULL;
+ struct io_rsrc_node *node;
+
+ node = io_cache_alloc(&ctx->node_cache, GFP_KERNEL);
+ if (node) {
+ node->type = type;
+ node->refs = 1;
+ node->tag = 0;
+ node->file_ptr = 0;
}
+ return node;
}
-int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
-{
- if (ctx->rsrc_backup_node)
- return 0;
- ctx->rsrc_backup_node = io_rsrc_node_alloc();
- return ctx->rsrc_backup_node ? 0 : -ENOMEM;
-}
-
-__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
- struct io_ring_ctx *ctx)
+bool io_rsrc_cache_init(struct io_ring_ctx *ctx)
{
- int ret;
-
- /* As we may drop ->uring_lock, other task may have started quiesce */
- if (data->quiesce)
- return -ENXIO;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- io_rsrc_node_switch(ctx, data);
-
- /* kill initial ref, already quiesced if zero */
- if (atomic_dec_and_test(&data->refs))
- return 0;
-
- data->quiesce = true;
- mutex_unlock(&ctx->uring_lock);
- do {
- ret = io_run_task_work_sig(ctx);
- if (ret < 0) {
- atomic_inc(&data->refs);
- /* wait for all works potentially completing data->done */
- flush_delayed_work(&ctx->rsrc_put_work);
- reinit_completion(&data->done);
- mutex_lock(&ctx->uring_lock);
- break;
- }
-
- flush_delayed_work(&ctx->rsrc_put_work);
- ret = wait_for_completion_interruptible(&data->done);
- if (!ret) {
- mutex_lock(&ctx->uring_lock);
- if (atomic_read(&data->refs) <= 0)
- break;
- /*
- * it has been revived by another thread while
- * we were unlocked
- */
- mutex_unlock(&ctx->uring_lock);
- }
- } while (1);
- data->quiesce = false;
-
+ const int imu_cache_size = struct_size_t(struct io_mapped_ubuf, bvec,
+ IO_CACHED_BVECS_SEGS);
+ const int node_size = sizeof(struct io_rsrc_node);
+ bool ret;
+
+ ret = io_alloc_cache_init(&ctx->node_cache, IO_ALLOC_CACHE_MAX,
+ node_size, 0);
+ ret |= io_alloc_cache_init(&ctx->imu_cache, IO_ALLOC_CACHE_MAX,
+ imu_cache_size, 0);
return ret;
}
-static void io_free_page_table(void **table, size_t size)
+void io_rsrc_cache_free(struct io_ring_ctx *ctx)
{
- unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
-
- for (i = 0; i < nr_tables; i++)
- kfree(table[i]);
- kfree(table);
+ io_alloc_cache_free(&ctx->node_cache, kfree);
+ io_alloc_cache_free(&ctx->imu_cache, kfree);
}
-static void io_rsrc_data_free(struct io_rsrc_data *data)
+static void io_clear_table_tags(struct io_rsrc_data *data)
{
- size_t size = data->nr * sizeof(data->tags[0][0]);
-
- if (data->tags)
- io_free_page_table((void **)data->tags, size);
- kfree(data);
-}
-
-static __cold void **io_alloc_page_table(size_t size)
-{
- unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
- size_t init_size = size;
- void **table;
-
- table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
- if (!table)
- return NULL;
+ int i;
- for (i = 0; i < nr_tables; i++) {
- unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
+ for (i = 0; i < data->nr; i++) {
+ struct io_rsrc_node *node = data->nodes[i];
- table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
- if (!table[i]) {
- io_free_page_table(table, init_size);
- return NULL;
- }
- size -= this_size;
+ if (node)
+ node->tag = 0;
}
- return table;
}
-__cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
- rsrc_put_fn *do_put, u64 __user *utags,
- unsigned nr, struct io_rsrc_data **pdata)
+__cold void io_rsrc_data_free(struct io_ring_ctx *ctx,
+ struct io_rsrc_data *data)
{
- struct io_rsrc_data *data;
- int ret = -ENOMEM;
- unsigned i;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
- data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
- if (!data->tags) {
- kfree(data);
- return -ENOMEM;
+ if (!data->nr)
+ return;
+ while (data->nr--) {
+ if (data->nodes[data->nr])
+ io_put_rsrc_node(ctx, data->nodes[data->nr]);
}
+ kvfree(data->nodes);
+ data->nodes = NULL;
+ data->nr = 0;
+}
- data->nr = nr;
- data->ctx = ctx;
- data->do_put = do_put;
- if (utags) {
- ret = -EFAULT;
- for (i = 0; i < nr; i++) {
- u64 *tag_slot = io_get_tag_slot(data, i);
-
- if (copy_from_user(tag_slot, &utags[i],
- sizeof(*tag_slot)))
- goto fail;
- }
+__cold int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr)
+{
+ data->nodes = kvmalloc_array(nr, sizeof(struct io_rsrc_node *),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (data->nodes) {
+ data->nr = nr;
+ return 0;
}
-
- atomic_set(&data->refs, 1);
- init_completion(&data->done);
- *pdata = data;
- return 0;
-fail:
- io_rsrc_data_free(data);
- return ret;
+ return -ENOMEM;
}
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
@@ -451,16 +229,12 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
{
u64 __user *tags = u64_to_user_ptr(up->tags);
__s32 __user *fds = u64_to_user_ptr(up->data);
- struct io_rsrc_data *data = ctx->file_data;
- struct io_fixed_file *file_slot;
- struct file *file;
int fd, i, err = 0;
unsigned int done;
- bool needs_switch = false;
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_files)
+ if (up->offset + nr_args > ctx->file_table.data.nr)
return -EINVAL;
for (done = 0; done < nr_args; done++) {
@@ -478,50 +252,39 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (fd == IORING_REGISTER_FILES_SKIP)
continue;
- i = array_index_nospec(up->offset + done, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, i);
-
- if (file_slot->file_ptr) {
- file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
- if (err)
- break;
- file_slot->file_ptr = 0;
+ i = up->offset + done;
+ if (io_reset_rsrc_node(ctx, &ctx->file_table.data, i))
io_file_bitmap_clear(&ctx->file_table, i);
- needs_switch = true;
- }
+
if (fd != -1) {
- file = fget(fd);
+ struct file *file = fget(fd);
+ struct io_rsrc_node *node;
+
if (!file) {
err = -EBADF;
break;
}
/*
- * Don't allow io_uring instances to be registered. If
- * UNIX isn't enabled, then this causes a reference
- * cycle and this instance can never get freed. If UNIX
- * is enabled we'll handle it just fine, but there's
- * still no point in allowing a ring fd as it doesn't
- * support regular read/write anyway.
+ * Don't allow io_uring instances to be registered.
*/
if (io_is_uring_fops(file)) {
fput(file);
err = -EBADF;
break;
}
- err = io_scm_file_account(ctx, file);
- if (err) {
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ if (!node) {
+ err = -ENOMEM;
fput(file);
break;
}
- *io_get_tag_slot(data, i) = tag;
- io_fixed_file_set(file_slot, file);
+ ctx->file_table.data.nodes[i] = node;
+ if (tag)
+ node->tag = tag;
+ io_fixed_file_set(node, file);
io_file_bitmap_set(&ctx->file_table, i);
}
}
-
- if (needs_switch)
- io_rsrc_node_switch(ctx, data);
return done ? done : err;
}
@@ -530,58 +293,55 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
unsigned int nr_args)
{
u64 __user *tags = u64_to_user_ptr(up->tags);
- struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
+ struct iovec fast_iov, *iov;
struct page *last_hpage = NULL;
- bool needs_switch = false;
+ struct iovec __user *uvec;
+ u64 user_data = up->data;
__u32 done;
int i, err;
- if (!ctx->buf_data)
+ if (!ctx->buf_table.nr)
return -ENXIO;
- if (up->offset + nr_args > ctx->nr_user_bufs)
+ if (up->offset + nr_args > ctx->buf_table.nr)
return -EINVAL;
for (done = 0; done < nr_args; done++) {
- struct io_mapped_ubuf *imu;
- int offset = up->offset + done;
+ struct io_rsrc_node *node;
u64 tag = 0;
- err = io_copy_iov(ctx, &iov, iovs, done);
- if (err)
+ uvec = u64_to_user_ptr(user_data);
+ iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
+ if (IS_ERR(iov)) {
+ err = PTR_ERR(iov);
break;
+ }
if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
err = -EFAULT;
break;
}
- err = io_buffer_validate(&iov);
+ err = io_buffer_validate(iov);
if (err)
break;
- if (!iov.iov_base && tag) {
- err = -EINVAL;
+ node = io_sqe_buffer_register(ctx, iov, &last_hpage);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
break;
}
- err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
- if (err)
- break;
-
- i = array_index_nospec(offset, ctx->nr_user_bufs);
- if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
- err = io_queue_rsrc_removal(ctx->buf_data, i,
- ctx->rsrc_node, ctx->user_bufs[i]);
- if (unlikely(err)) {
- io_buffer_unmap(ctx, &imu);
+ if (tag) {
+ if (!node) {
+ err = -EINVAL;
break;
}
- ctx->user_bufs[i] = ctx->dummy_ubuf;
- needs_switch = true;
+ node->tag = tag;
}
-
- ctx->user_bufs[i] = imu;
- *io_get_tag_slot(ctx->buf_data, offset) = tag;
+ i = array_index_nospec(up->offset + done, ctx->buf_table.nr);
+ io_reset_rsrc_node(ctx, &ctx->buf_table, i);
+ ctx->buf_table.nodes[i] = node;
+ if (ctx->compat)
+ user_data += sizeof(struct compat_iovec);
+ else
+ user_data += sizeof(struct iovec);
}
-
- if (needs_switch)
- io_rsrc_node_switch(ctx, ctx->buf_data);
return done ? done : err;
}
@@ -590,13 +350,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
unsigned nr_args)
{
__u32 tmp;
- int err;
+
+ lockdep_assert_held(&ctx->uring_lock);
if (check_add_overflow(up->offset, nr_args, &tmp))
return -EOVERFLOW;
- err = io_rsrc_node_switch_start(ctx);
- if (err)
- return err;
switch (type) {
case IORING_RSRC_FILE:
@@ -694,11 +452,11 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
struct file *file;
int ret, fd;
- if (!req->ctx->file_data)
+ if (!req->ctx->file_table.data.nr)
return -ENXIO;
for (done = 0; done < up->nr_args; done++) {
- if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
+ if (get_user(fd, &fds[done])) {
ret = -EFAULT;
break;
}
@@ -712,7 +470,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
IORING_FILE_INDEX_ALLOC);
if (ret < 0)
break;
- if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
+ if (put_user(ret, &fds[done])) {
__io_close_fixed(req->ctx, issue_flags, ret);
ret = -EFAULT;
break;
@@ -750,205 +508,39 @@ int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
-int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc)
+void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
- u64 *tag_slot = io_get_tag_slot(data, idx);
- struct io_rsrc_put *prsrc;
-
- prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
- if (!prsrc)
- return -ENOMEM;
-
- prsrc->tag = *tag_slot;
- *tag_slot = 0;
- prsrc->rsrc = rsrc;
- list_add(&prsrc->list, &node->rsrc_list);
- return 0;
-}
+ if (node->tag)
+ io_post_aux_cqe(ctx, node->tag, 0, 0);
-void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < ctx->nr_user_files; i++) {
- struct file *file = io_file_from_index(&ctx->file_table, i);
-
- /* skip scm accounted files, they'll be freed by ->ring_sock */
- if (!file || io_file_need_scm(file))
- continue;
- io_file_bitmap_clear(&ctx->file_table, i);
- fput(file);
+ switch (node->type) {
+ case IORING_RSRC_FILE:
+ fput(io_slot_file(node));
+ break;
+ case IORING_RSRC_BUFFER:
+ io_buffer_unmap(ctx, node->buf);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
- kfree_skb(skb);
- }
-#endif
- io_free_file_tables(&ctx->file_table);
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
- ctx->nr_user_files = 0;
+ io_cache_free(&ctx->node_cache, node);
}
int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
- unsigned nr = ctx->nr_user_files;
- int ret;
-
- if (!ctx->file_data)
+ if (!ctx->file_table.data.nr)
return -ENXIO;
- /*
- * Quiesce may unlock ->uring_lock, and while it's not held
- * prevent new requests using the table.
- */
- ctx->nr_user_files = 0;
- ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
- ctx->nr_user_files = nr;
- if (!ret)
- __io_sqe_files_unregister(ctx);
- return ret;
-}
-
-/*
- * Ensure the UNIX gc is aware of our file set, so we are certain that
- * the io_uring can be safely unregistered on process exit, even if we have
- * loops in the file referencing. We account only files that can hold other
- * files because otherwise they can't form a loop and so are not interesting
- * for GC.
- */
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
-{
-#if defined(CONFIG_UNIX)
- struct sock *sk = ctx->ring_sock->sk;
- struct sk_buff_head *head = &sk->sk_receive_queue;
- struct scm_fp_list *fpl;
- struct sk_buff *skb;
-
- if (likely(!io_file_need_scm(file)))
- return 0;
-
- /*
- * See if we can merge this file into an existing skb SCM_RIGHTS
- * file set. If there's no room, fall back to allocating a new skb
- * and filling it in.
- */
- spin_lock_irq(&head->lock);
- skb = skb_peek(head);
- if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
- __skb_unlink(skb, head);
- else
- skb = NULL;
- spin_unlock_irq(&head->lock);
-
- if (!skb) {
- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
- if (!fpl)
- return -ENOMEM;
-
- skb = alloc_skb(0, GFP_KERNEL);
- if (!skb) {
- kfree(fpl);
- return -ENOMEM;
- }
-
- fpl->user = get_uid(current_user());
- fpl->max = SCM_MAX_FD;
- fpl->count = 0;
-
- UNIXCB(skb).fp = fpl;
- skb->sk = sk;
- skb->scm_io_uring = 1;
- skb->destructor = unix_destruct_scm;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- }
-
- fpl = UNIXCB(skb).fp;
- fpl->fp[fpl->count++] = get_file(file);
- unix_inflight(fpl->user, file);
- skb_queue_head(head, skb);
- fput(file);
-#endif
+ io_free_file_tables(ctx, &ctx->file_table);
+ io_file_table_set_alloc_range(ctx, 0, 0);
return 0;
}
-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
-{
- struct file *file = prsrc->file;
-#if defined(CONFIG_UNIX)
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head list, *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
- int i;
-
- if (!io_file_need_scm(file)) {
- fput(file);
- return;
- }
-
- __skb_queue_head_init(&list);
-
- /*
- * Find the skb that holds this file in its SCM_RIGHTS. When found,
- * remove this entry and rearrange the file array.
- */
- skb = skb_dequeue(head);
- while (skb) {
- struct scm_fp_list *fp;
-
- fp = UNIXCB(skb).fp;
- for (i = 0; i < fp->count; i++) {
- int left;
-
- if (fp->fp[i] != file)
- continue;
-
- unix_notinflight(fp->user, fp->fp[i]);
- left = fp->count - 1 - i;
- if (left) {
- memmove(&fp->fp[i], &fp->fp[i + 1],
- left * sizeof(struct file *));
- }
- fp->count--;
- if (!fp->count) {
- kfree_skb(skb);
- skb = NULL;
- } else {
- __skb_queue_tail(&list, skb);
- }
- fput(file);
- file = NULL;
- break;
- }
-
- if (!file)
- break;
-
- __skb_queue_tail(&list, skb);
-
- skb = skb_dequeue(head);
- }
-
- if (skb_peek(&list)) {
- spin_lock_irq(&head->lock);
- while ((skb = __skb_dequeue(&list)) != NULL)
- __skb_queue_tail(head, skb);
- spin_unlock_irq(&head->lock);
- }
-#else
- fput(file);
-#endif
-}
-
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args, u64 __user *tags)
{
@@ -957,7 +549,7 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
int fd, ret;
unsigned i;
- if (ctx->file_data)
+ if (ctx->file_table.data.nr)
return -EBUSY;
if (!nr_args)
return -EINVAL;
@@ -965,31 +557,22 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return -EMFILE;
if (nr_args > rlimit(RLIMIT_NOFILE))
return -EMFILE;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
- &ctx->file_data);
- if (ret)
- return ret;
-
- if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
- io_rsrc_data_free(ctx->file_data);
- ctx->file_data = NULL;
+ if (!io_alloc_file_tables(ctx, &ctx->file_table, nr_args))
return -ENOMEM;
- }
- for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
- struct io_fixed_file *file_slot;
+ for (i = 0; i < nr_args; i++) {
+ struct io_rsrc_node *node;
+ u64 tag = 0;
- if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
- ret = -EFAULT;
+ ret = -EFAULT;
+ if (tags && copy_from_user(&tag, &tags[i], sizeof(tag)))
+ goto fail;
+ if (fds && copy_from_user(&fd, &fds[i], sizeof(fd)))
goto fail;
- }
/* allow sparse sets */
if (!fds || fd == -1) {
ret = -EINVAL;
- if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
+ if (tag)
goto fail;
continue;
}
@@ -1000,72 +583,40 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
goto fail;
/*
- * Don't allow io_uring instances to be registered. If UNIX
- * isn't enabled, then this causes a reference cycle and this
- * instance can never get freed. If UNIX is enabled we'll
- * handle it just fine, but there's still no point in allowing
- * a ring fd as it doesn't support regular read/write anyway.
+ * Don't allow io_uring instances to be registered.
*/
if (io_is_uring_fops(file)) {
fput(file);
goto fail;
}
- ret = io_scm_file_account(ctx, file);
- if (ret) {
+ ret = -ENOMEM;
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_FILE);
+ if (!node) {
fput(file);
goto fail;
}
- file_slot = io_fixed_file_slot(&ctx->file_table, i);
- io_fixed_file_set(file_slot, file);
+ if (tag)
+ node->tag = tag;
+ ctx->file_table.data.nodes[i] = node;
+ io_fixed_file_set(node, file);
io_file_bitmap_set(&ctx->file_table, i);
}
/* default it to the whole table */
- io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
- io_rsrc_node_switch(ctx, NULL);
+ io_file_table_set_alloc_range(ctx, 0, ctx->file_table.data.nr);
return 0;
fail:
- __io_sqe_files_unregister(ctx);
+ io_clear_table_tags(&ctx->file_table.data);
+ io_sqe_files_unregister(ctx);
return ret;
}
-static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
-{
- io_buffer_unmap(ctx, &prsrc->buf);
- prsrc->buf = NULL;
-}
-
-void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
-{
- unsigned int i;
-
- for (i = 0; i < ctx->nr_user_bufs; i++)
- io_buffer_unmap(ctx, &ctx->user_bufs[i]);
- kfree(ctx->user_bufs);
- io_rsrc_data_free(ctx->buf_data);
- ctx->user_bufs = NULL;
- ctx->buf_data = NULL;
- ctx->nr_user_bufs = 0;
-}
-
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
{
- unsigned nr = ctx->nr_user_bufs;
- int ret;
-
- if (!ctx->buf_data)
+ if (!ctx->buf_table.nr)
return -ENXIO;
-
- /*
- * Quiesce may unlock ->uring_lock, and while it's not held
- * prevent new requests using the table.
- */
- ctx->nr_user_bufs = 0;
- ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
- ctx->nr_user_bufs = nr;
- if (!ret)
- __io_sqe_buffers_unregister(ctx);
- return ret;
+ io_rsrc_data_free(ctx, &ctx->buf_table);
+ return 0;
}
/*
@@ -1091,9 +642,13 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
}
/* check previously registered pages */
- for (i = 0; i < ctx->nr_user_bufs; i++) {
- struct io_mapped_ubuf *imu = ctx->user_bufs[i];
+ for (i = 0; i < ctx->buf_table.nr; i++) {
+ struct io_rsrc_node *node = ctx->buf_table.nodes[i];
+ struct io_mapped_ubuf *imu;
+ if (!node)
+ continue;
+ imu = node->buf;
for (j = 0; j < imu->nr_bvecs; j++) {
if (!PageCompound(imu->bvec[j].bv_page))
continue;
@@ -1131,86 +686,116 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
if (!imu->acct_pages)
return 0;
- ret = io_account_mem(ctx, imu->acct_pages);
+ ret = io_account_mem(ctx->user, ctx->mm_account, imu->acct_pages);
if (ret)
imu->acct_pages = 0;
return ret;
}
-struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
+static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
+ struct io_imu_folio_data *data)
{
- unsigned long start, end, nr_pages;
- struct vm_area_struct **vmas = NULL;
- struct page **pages = NULL;
- int i, pret, ret = -ENOMEM;
+ struct page **page_array = *pages, **new_array = NULL;
+ unsigned nr_pages_left = *nr_pages;
+ unsigned nr_folios = data->nr_folios;
+ unsigned i, j;
+
+ /* Store head pages only*/
+ new_array = kvmalloc_array(nr_folios, sizeof(struct page *), GFP_KERNEL);
+ if (!new_array)
+ return false;
+
+ for (i = 0, j = 0; i < nr_folios; i++) {
+ struct page *p = compound_head(page_array[j]);
+ struct folio *folio = page_folio(p);
+ unsigned int nr;
+
+ WARN_ON_ONCE(i > 0 && p != page_array[j]);
+
+ nr = i ? data->nr_pages_mid : data->nr_pages_head;
+ nr = min(nr, nr_pages_left);
+ /* Drop all but one ref, the entire folio will remain pinned. */
+ if (nr > 1)
+ unpin_user_folio(folio, nr - 1);
+ j += nr;
+ nr_pages_left -= nr;
+ new_array[i] = p;
+ }
- end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- start = ubuf >> PAGE_SHIFT;
- nr_pages = end - start;
+ WARN_ON_ONCE(j != *nr_pages);
- pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- goto done;
+ kvfree(page_array);
+ *pages = new_array;
+ *nr_pages = nr_folios;
+ return true;
+}
- vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
- GFP_KERNEL);
- if (!vmas)
- goto done;
+bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
+ struct io_imu_folio_data *data)
+{
+ struct folio *folio = page_folio(page_array[0]);
+ unsigned int count = 1, nr_folios = 1;
+ int i;
- ret = 0;
- mmap_read_lock(current->mm);
- pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
- pages, vmas);
- if (pret == nr_pages) {
- /* don't support file backed memory */
- for (i = 0; i < nr_pages; i++) {
- struct vm_area_struct *vma = vmas[i];
-
- if (vma_is_shmem(vma))
- continue;
- if (vma->vm_file &&
- !is_file_hugepages(vma->vm_file)) {
- ret = -EOPNOTSUPP;
- break;
- }
+ data->nr_pages_mid = folio_nr_pages(folio);
+ data->folio_shift = folio_shift(folio);
+ data->first_folio_page_idx = folio_page_idx(folio, page_array[0]);
+
+ /*
+ * Check if pages are contiguous inside a folio, and all folios have
+ * the same page count except for the head and tail.
+ */
+ for (i = 1; i < nr_pages; i++) {
+ if (page_folio(page_array[i]) == folio &&
+ page_array[i] == page_array[i-1] + 1) {
+ count++;
+ continue;
}
- *npages = nr_pages;
- } else {
- ret = pret < 0 ? pret : -EFAULT;
- }
- mmap_read_unlock(current->mm);
- if (ret) {
- /*
- * if we did partial map, or found file backed vmas,
- * release any pages we did get
- */
- if (pret > 0)
- unpin_user_pages(pages, pret);
- goto done;
- }
- ret = 0;
-done:
- kvfree(vmas);
- if (ret < 0) {
- kvfree(pages);
- pages = ERR_PTR(ret);
+
+ if (nr_folios == 1) {
+ if (folio_page_idx(folio, page_array[i-1]) !=
+ data->nr_pages_mid - 1)
+ return false;
+
+ data->nr_pages_head = count;
+ } else if (count != data->nr_pages_mid) {
+ return false;
+ }
+
+ folio = page_folio(page_array[i]);
+ if (folio_size(folio) != (1UL << data->folio_shift) ||
+ folio_page_idx(folio, page_array[i]) != 0)
+ return false;
+
+ count = 1;
+ nr_folios++;
}
- return pages;
+ if (nr_folios == 1)
+ data->nr_pages_head = count;
+
+ data->nr_folios = nr_folios;
+ return true;
}
-static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
- struct io_mapped_ubuf **pimu,
- struct page **last_hpage)
+static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
+ struct iovec *iov,
+ struct page **last_hpage)
{
struct io_mapped_ubuf *imu = NULL;
struct page **pages = NULL;
+ struct io_rsrc_node *node;
unsigned long off;
size_t size;
int ret, nr_pages, i;
+ struct io_imu_folio_data data;
+ bool coalesced = false;
- *pimu = ctx->dummy_ubuf;
if (!iov->iov_base)
- return 0;
+ return NULL;
+
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
@@ -1221,162 +806,772 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
goto done;
}
- imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
+ /* If it's huge page(s), try to coalesce them into fewer bvec entries */
+ if (nr_pages > 1 && io_check_coalesce_buffer(pages, nr_pages, &data)) {
+ if (data.nr_pages_mid != 1)
+ coalesced = io_coalesce_buffer(&pages, &nr_pages, &data);
+ }
+
+ imu = io_alloc_imu(ctx, nr_pages);
if (!imu)
goto done;
+ imu->nr_bvecs = nr_pages;
ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
- if (ret) {
- unpin_user_pages(pages, nr_pages);
+ if (ret)
goto done;
- }
- off = (unsigned long) iov->iov_base & ~PAGE_MASK;
size = iov->iov_len;
+ /* store original address for later verification */
+ imu->ubuf = (unsigned long) iov->iov_base;
+ imu->len = iov->iov_len;
+ imu->folio_shift = PAGE_SHIFT;
+ imu->release = io_release_ubuf;
+ imu->priv = imu;
+ imu->is_kbuf = false;
+ imu->dir = IO_IMU_DEST | IO_IMU_SOURCE;
+ if (coalesced)
+ imu->folio_shift = data.folio_shift;
+ refcount_set(&imu->refs, 1);
+
+ off = (unsigned long)iov->iov_base & ~PAGE_MASK;
+ if (coalesced)
+ off += data.first_folio_page_idx << PAGE_SHIFT;
+
+ node->buf = imu;
+ ret = 0;
+
for (i = 0; i < nr_pages; i++) {
size_t vec_len;
- vec_len = min_t(size_t, size, PAGE_SIZE - off);
- imu->bvec[i].bv_page = pages[i];
- imu->bvec[i].bv_len = vec_len;
- imu->bvec[i].bv_offset = off;
+ vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
+ bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
off = 0;
size -= vec_len;
}
- /* store original address for later verification */
- imu->ubuf = (unsigned long) iov->iov_base;
- imu->ubuf_end = imu->ubuf + iov->iov_len;
- imu->nr_bvecs = nr_pages;
- *pimu = imu;
- ret = 0;
done:
- if (ret)
- kvfree(imu);
+ if (ret) {
+ if (imu)
+ io_free_imu(ctx, imu);
+ if (pages) {
+ for (i = 0; i < nr_pages; i++)
+ unpin_user_folio(page_folio(pages[i]), 1);
+ }
+ io_cache_free(&ctx->node_cache, node);
+ node = ERR_PTR(ret);
+ }
kvfree(pages);
- return ret;
-}
-
-static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
-{
- ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
- return ctx->user_bufs ? 0 : -ENOMEM;
+ return node;
}
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned int nr_args, u64 __user *tags)
{
struct page *last_hpage = NULL;
- struct io_rsrc_data *data;
+ struct io_rsrc_data data;
+ struct iovec fast_iov, *iov = &fast_iov;
+ const struct iovec __user *uvec;
int i, ret;
- struct iovec iov;
BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
- if (ctx->user_bufs)
+ if (ctx->buf_table.nr)
return -EBUSY;
if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
return -EINVAL;
- ret = io_rsrc_node_switch_start(ctx);
- if (ret)
- return ret;
- ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
+ ret = io_rsrc_data_alloc(&data, nr_args);
if (ret)
return ret;
- ret = io_buffers_map_alloc(ctx, nr_args);
- if (ret) {
- io_rsrc_data_free(data);
- return ret;
- }
- for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
+ if (!arg)
+ memset(iov, 0, sizeof(*iov));
+
+ for (i = 0; i < nr_args; i++) {
+ struct io_rsrc_node *node;
+ u64 tag = 0;
+
if (arg) {
- ret = io_copy_iov(ctx, &iov, arg, i);
- if (ret)
+ uvec = (struct iovec __user *) arg;
+ iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
break;
- ret = io_buffer_validate(&iov);
+ }
+ ret = io_buffer_validate(iov);
if (ret)
break;
- } else {
- memset(&iov, 0, sizeof(iov));
+ if (ctx->compat)
+ arg += sizeof(struct compat_iovec);
+ else
+ arg += sizeof(struct iovec);
}
- if (!iov.iov_base && *io_get_tag_slot(data, i)) {
- ret = -EINVAL;
- break;
+ if (tags) {
+ if (copy_from_user(&tag, &tags[i], sizeof(tag))) {
+ ret = -EFAULT;
+ break;
+ }
}
- ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
- &last_hpage);
- if (ret)
+ node = io_sqe_buffer_register(ctx, iov, &last_hpage);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
break;
+ }
+ if (tag) {
+ if (!node) {
+ ret = -EINVAL;
+ break;
+ }
+ node->tag = tag;
+ }
+ data.nodes[i] = node;
}
- WARN_ON_ONCE(ctx->buf_data);
+ ctx->buf_table = data;
+ if (ret) {
+ io_clear_table_tags(&ctx->buf_table);
+ io_sqe_buffers_unregister(ctx);
+ }
+ return ret;
+}
- ctx->buf_data = data;
- if (ret)
- __io_sqe_buffers_unregister(ctx);
- else
- io_rsrc_node_switch(ctx, NULL);
+int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
+ void (*release)(void *), unsigned int index,
+ unsigned int issue_flags)
+{
+ struct io_ring_ctx *ctx = cmd_to_io_kiocb(cmd)->ctx;
+ struct io_rsrc_data *data = &ctx->buf_table;
+ struct req_iterator rq_iter;
+ struct io_mapped_ubuf *imu;
+ struct io_rsrc_node *node;
+ struct bio_vec bv;
+ unsigned int nr_bvecs = 0;
+ int ret = 0;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ if (index >= data->nr) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ index = array_index_nospec(index, data->nr);
+
+ if (data->nodes[index]) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ if (!node) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /*
+ * blk_rq_nr_phys_segments() may overestimate the number of bvecs
+ * but avoids needing to iterate over the bvecs
+ */
+ imu = io_alloc_imu(ctx, blk_rq_nr_phys_segments(rq));
+ if (!imu) {
+ kfree(node);
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ imu->ubuf = 0;
+ imu->len = blk_rq_bytes(rq);
+ imu->acct_pages = 0;
+ imu->folio_shift = PAGE_SHIFT;
+ refcount_set(&imu->refs, 1);
+ imu->release = release;
+ imu->priv = rq;
+ imu->is_kbuf = true;
+ imu->dir = 1 << rq_data_dir(rq);
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ imu->bvec[nr_bvecs++] = bv;
+ imu->nr_bvecs = nr_bvecs;
+
+ node->buf = imu;
+ data->nodes[index] = node;
+unlock:
+ io_ring_submit_unlock(ctx, issue_flags);
return ret;
}
+EXPORT_SYMBOL_GPL(io_buffer_register_bvec);
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len)
+int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
+ unsigned int issue_flags)
+{
+ struct io_ring_ctx *ctx = cmd_to_io_kiocb(cmd)->ctx;
+ struct io_rsrc_data *data = &ctx->buf_table;
+ struct io_rsrc_node *node;
+ int ret = 0;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ if (index >= data->nr) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ index = array_index_nospec(index, data->nr);
+
+ node = data->nodes[index];
+ if (!node) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (!node->buf->is_kbuf) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ io_put_rsrc_node(ctx, node);
+ data->nodes[index] = NULL;
+unlock:
+ io_ring_submit_unlock(ctx, issue_flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(io_buffer_unregister_bvec);
+
+static int validate_fixed_range(u64 buf_addr, size_t len,
+ const struct io_mapped_ubuf *imu)
{
u64 buf_end;
- size_t offset;
- if (WARN_ON_ONCE(!imu))
- return -EFAULT;
if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
return -EFAULT;
/* not inside the mapped region */
- if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
+ if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
return -EFAULT;
+ if (unlikely(len > MAX_RW_COUNT))
+ return -EFAULT;
+ return 0;
+}
+
+static int io_import_kbuf(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu, size_t len, size_t offset)
+{
+ size_t count = len + offset;
+
+ iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, count);
+ iov_iter_advance(iter, offset);
+
+ if (count < imu->len) {
+ const struct bio_vec *bvec = iter->bvec;
+
+ while (len > bvec->bv_len) {
+ len -= bvec->bv_len;
+ bvec++;
+ }
+ iter->nr_segs = 1 + bvec - iter->bvec;
+ }
+ return 0;
+}
+
+static int io_import_fixed(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ u64 buf_addr, size_t len)
+{
+ const struct bio_vec *bvec;
+ size_t folio_mask;
+ unsigned nr_segs;
+ size_t offset;
+ int ret;
+
+ ret = validate_fixed_range(buf_addr, len, imu);
+ if (unlikely(ret))
+ return ret;
+ if (!(imu->dir & (1 << ddir)))
+ return -EFAULT;
+
+ offset = buf_addr - imu->ubuf;
+
+ if (imu->is_kbuf)
+ return io_import_kbuf(ddir, iter, imu, len, offset);
/*
- * May not be a start of buffer, set size appropriately
- * and advance us to the beginning.
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here for user
+ * registered nodes, because we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are the same in size, except potentially the
+ * first and last bvec
*/
- offset = buf_addr - imu->ubuf;
- iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
+ folio_mask = (1UL << imu->folio_shift) - 1;
+ bvec = imu->bvec;
+ if (offset >= bvec->bv_len) {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> imu->folio_shift);
+ bvec += seg_skip;
+ offset &= folio_mask;
+ }
+ nr_segs = (offset + len + bvec->bv_offset + folio_mask) >> imu->folio_shift;
+ iov_iter_bvec(iter, ddir, bvec, nr_segs, len);
+ iter->iov_offset = offset;
+ return 0;
+}
+
+inline struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
+ unsigned issue_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_rsrc_node *node;
+
+ if (req->flags & REQ_F_BUF_NODE)
+ return req->buf_node;
+ req->flags |= REQ_F_BUF_NODE;
+
+ io_ring_submit_lock(ctx, issue_flags);
+ node = io_rsrc_node_lookup(&ctx->buf_table, req->buf_index);
+ if (node) {
+ node->refs++;
+ req->buf_node = node;
+ io_ring_submit_unlock(ctx, issue_flags);
+ return node;
+ }
+ req->flags &= ~REQ_F_BUF_NODE;
+ io_ring_submit_unlock(ctx, issue_flags);
+ return NULL;
+}
+
+int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
+ u64 buf_addr, size_t len, int ddir,
+ unsigned issue_flags)
+{
+ struct io_rsrc_node *node;
+
+ node = io_find_buf_node(req, issue_flags);
+ if (!node)
+ return -EFAULT;
+ return io_import_fixed(ddir, iter, node->buf, buf_addr, len);
+}
+
+/* Lock two rings at once. The rings must be different! */
+static void lock_two_rings(struct io_ring_ctx *ctx1, struct io_ring_ctx *ctx2)
+{
+ if (ctx1 > ctx2)
+ swap(ctx1, ctx2);
+ mutex_lock(&ctx1->uring_lock);
+ mutex_lock_nested(&ctx2->uring_lock, SINGLE_DEPTH_NESTING);
+}
+
+/* Both rings are locked by the caller. */
+static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx,
+ struct io_uring_clone_buffers *arg)
+{
+ struct io_rsrc_data data;
+ int i, ret, off, nr;
+ unsigned int nbufs;
+
+ lockdep_assert_held(&ctx->uring_lock);
+ lockdep_assert_held(&src_ctx->uring_lock);
+
+ /*
+ * Accounting state is shared between the two rings; that only works if
+ * both rings are accounted towards the same counters.
+ */
+ if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
+ return -EINVAL;
+
+ /* if offsets are given, must have nr specified too */
+ if (!arg->nr && (arg->dst_off || arg->src_off))
+ return -EINVAL;
+ /* not allowed unless REPLACE is set */
+ if (ctx->buf_table.nr && !(arg->flags & IORING_REGISTER_DST_REPLACE))
+ return -EBUSY;
+
+ nbufs = src_ctx->buf_table.nr;
+ if (!nbufs)
+ return -ENXIO;
+ if (!arg->nr)
+ arg->nr = nbufs;
+ else if (arg->nr > nbufs)
+ return -EINVAL;
+ else if (arg->nr > IORING_MAX_REG_BUFFERS)
+ return -EINVAL;
+ if (check_add_overflow(arg->nr, arg->src_off, &off) || off > nbufs)
+ return -EOVERFLOW;
+ if (check_add_overflow(arg->nr, arg->dst_off, &nbufs))
+ return -EOVERFLOW;
+ if (nbufs > IORING_MAX_REG_BUFFERS)
+ return -EINVAL;
+
+ ret = io_rsrc_data_alloc(&data, max(nbufs, ctx->buf_table.nr));
+ if (ret)
+ return ret;
+
+ /* Copy original dst nodes from before the cloned range */
+ for (i = 0; i < min(arg->dst_off, ctx->buf_table.nr); i++) {
+ struct io_rsrc_node *node = ctx->buf_table.nodes[i];
- if (offset) {
+ if (node) {
+ data.nodes[i] = node;
+ node->refs++;
+ }
+ }
+
+ off = arg->dst_off;
+ i = arg->src_off;
+ nr = arg->nr;
+ while (nr--) {
+ struct io_rsrc_node *dst_node, *src_node;
+
+ src_node = io_rsrc_node_lookup(&src_ctx->buf_table, i);
+ if (!src_node) {
+ dst_node = NULL;
+ } else {
+ dst_node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
+ if (!dst_node) {
+ io_rsrc_data_free(ctx, &data);
+ return -ENOMEM;
+ }
+
+ refcount_inc(&src_node->buf->refs);
+ dst_node->buf = src_node->buf;
+ }
+ data.nodes[off++] = dst_node;
+ i++;
+ }
+
+ /* Copy original dst nodes from after the cloned range */
+ for (i = nbufs; i < ctx->buf_table.nr; i++) {
+ struct io_rsrc_node *node = ctx->buf_table.nodes[i];
+
+ if (node) {
+ data.nodes[i] = node;
+ node->refs++;
+ }
+ }
+
+ /*
+ * If asked for replace, put the old table. data->nodes[] holds both
+ * old and new nodes at this point.
+ */
+ if (arg->flags & IORING_REGISTER_DST_REPLACE)
+ io_rsrc_data_free(ctx, &ctx->buf_table);
+
+ /*
+ * ctx->buf_table must be empty now - either the contents are being
+ * replaced and we just freed the table, or the contents are being
+ * copied to a ring that does not have buffers yet (checked at function
+ * entry).
+ */
+ WARN_ON_ONCE(ctx->buf_table.nr);
+ ctx->buf_table = data;
+ return 0;
+}
+
+/*
+ * Copy the registered buffers from the source ring whose file descriptor
+ * is given in the src_fd to the current ring. This is identical to registering
+ * the buffers with ctx, except faster as mappings already exist.
+ *
+ * Since the memory is already accounted once, don't account it again.
+ */
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
+{
+ struct io_uring_clone_buffers buf;
+ struct io_ring_ctx *src_ctx;
+ bool registered_src;
+ struct file *file;
+ int ret;
+
+ if (copy_from_user(&buf, arg, sizeof(buf)))
+ return -EFAULT;
+ if (buf.flags & ~(IORING_REGISTER_SRC_REGISTERED|IORING_REGISTER_DST_REPLACE))
+ return -EINVAL;
+ if (!(buf.flags & IORING_REGISTER_DST_REPLACE) && ctx->buf_table.nr)
+ return -EBUSY;
+ if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
+ return -EINVAL;
+
+ registered_src = (buf.flags & IORING_REGISTER_SRC_REGISTERED) != 0;
+ file = io_uring_register_get_file(buf.src_fd, registered_src);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ src_ctx = file->private_data;
+ if (src_ctx != ctx) {
+ mutex_unlock(&ctx->uring_lock);
+ lock_two_rings(ctx, src_ctx);
+
+ if (src_ctx->submitter_task &&
+ src_ctx->submitter_task != current) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ ret = io_clone_buffers(ctx, src_ctx, &buf);
+
+out:
+ if (src_ctx != ctx)
+ mutex_unlock(&src_ctx->uring_lock);
+
+ fput(file);
+ return ret;
+}
+
+void io_vec_free(struct iou_vec *iv)
+{
+ if (!iv->iovec)
+ return;
+ kfree(iv->iovec);
+ iv->iovec = NULL;
+ iv->nr = 0;
+}
+
+int io_vec_realloc(struct iou_vec *iv, unsigned nr_entries)
+{
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+ struct iovec *iov;
+
+ iov = kmalloc_array(nr_entries, sizeof(iov[0]), gfp);
+ if (!iov)
+ return -ENOMEM;
+
+ io_vec_free(iv);
+ iv->iovec = iov;
+ iv->nr = nr_entries;
+ return 0;
+}
+
+static int io_vec_fill_bvec(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ struct iovec *iovec, unsigned nr_iovs,
+ struct iou_vec *vec)
+{
+ unsigned long folio_size = 1 << imu->folio_shift;
+ unsigned long folio_mask = folio_size - 1;
+ struct bio_vec *res_bvec = vec->bvec;
+ size_t total_len = 0;
+ unsigned bvec_idx = 0;
+ unsigned iov_idx;
+
+ for (iov_idx = 0; iov_idx < nr_iovs; iov_idx++) {
+ size_t iov_len = iovec[iov_idx].iov_len;
+ u64 buf_addr = (u64)(uintptr_t)iovec[iov_idx].iov_base;
+ struct bio_vec *src_bvec;
+ size_t offset;
+ int ret;
+
+ ret = validate_fixed_range(buf_addr, iov_len, imu);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(!iov_len))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(total_len, iov_len, &total_len)))
+ return -EOVERFLOW;
+
+ offset = buf_addr - imu->ubuf;
/*
- * Don't use iov_iter_advance() here, as it's really slow for
- * using the latter parts of a big fixed buffer - it iterates
- * over each segment manually. We can cheat a bit here, because
- * we know that:
- *
- * 1) it's a BVEC iter, we set it up
- * 2) all bvecs are PAGE_SIZE in size, except potentially the
- * first and last bvec
- *
- * So just find our index, and adjust the iterator afterwards.
- * If the offset is within the first bvec (or the whole first
- * bvec, just use iov_iter_advance(). This makes it easier
- * since we can just skip the first segment, which may not
- * be PAGE_SIZE aligned.
+ * Only the first bvec can have non zero bv_offset, account it
+ * here and work with full folios below.
*/
- const struct bio_vec *bvec = imu->bvec;
+ offset += imu->bvec[0].bv_offset;
- if (offset <= bvec->bv_len) {
- iov_iter_advance(iter, offset);
- } else {
- unsigned long seg_skip;
+ src_bvec = imu->bvec + (offset >> imu->folio_shift);
+ offset &= folio_mask;
- /* skip first vec */
- offset -= bvec->bv_len;
- seg_skip = 1 + (offset >> PAGE_SHIFT);
+ for (; iov_len; offset = 0, bvec_idx++, src_bvec++) {
+ size_t seg_size = min_t(size_t, iov_len,
+ folio_size - offset);
- iter->bvec = bvec + seg_skip;
- iter->nr_segs -= seg_skip;
- iter->count -= bvec->bv_len + offset;
- iter->iov_offset = offset & ~PAGE_MASK;
+ bvec_set_page(&res_bvec[bvec_idx],
+ src_bvec->bv_page, seg_size, offset);
+ iov_len -= seg_size;
}
}
+ if (total_len > MAX_RW_COUNT)
+ return -EINVAL;
+
+ iov_iter_bvec(iter, ddir, res_bvec, bvec_idx, total_len);
+ return 0;
+}
+
+static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
+ struct io_mapped_ubuf *imu)
+{
+ unsigned shift = imu->folio_shift;
+ size_t max_segs = 0;
+ unsigned i;
+
+ for (i = 0; i < nr_iovs; i++) {
+ max_segs += (iov[i].iov_len >> shift) + 2;
+ if (max_segs > INT_MAX)
+ return -EOVERFLOW;
+ }
+ return max_segs;
+}
+
+static int io_vec_fill_kern_bvec(int ddir, struct iov_iter *iter,
+ struct io_mapped_ubuf *imu,
+ struct iovec *iovec, unsigned nr_iovs,
+ struct iou_vec *vec)
+{
+ const struct bio_vec *src_bvec = imu->bvec;
+ struct bio_vec *res_bvec = vec->bvec;
+ unsigned res_idx = 0;
+ size_t total_len = 0;
+ unsigned iov_idx;
+
+ for (iov_idx = 0; iov_idx < nr_iovs; iov_idx++) {
+ size_t offset = (size_t)(uintptr_t)iovec[iov_idx].iov_base;
+ size_t iov_len = iovec[iov_idx].iov_len;
+ struct bvec_iter bi = {
+ .bi_size = offset + iov_len,
+ };
+ struct bio_vec bv;
+
+ bvec_iter_advance(src_bvec, &bi, offset);
+ for_each_mp_bvec(bv, src_bvec, bi, bi)
+ res_bvec[res_idx++] = bv;
+ total_len += iov_len;
+ }
+ iov_iter_bvec(iter, ddir, res_bvec, res_idx, total_len);
+ return 0;
+}
+
+static int iov_kern_bvec_size(const struct iovec *iov,
+ const struct io_mapped_ubuf *imu,
+ unsigned int *nr_seg)
+{
+ size_t offset = (size_t)(uintptr_t)iov->iov_base;
+ const struct bio_vec *bvec = imu->bvec;
+ int start = 0, i = 0;
+ size_t off = 0;
+ int ret;
+
+ ret = validate_fixed_range(offset, iov->iov_len, imu);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; off < offset + iov->iov_len && i < imu->nr_bvecs;
+ off += bvec[i].bv_len, i++) {
+ if (offset >= off && offset < off + bvec[i].bv_len)
+ start = i;
+ }
+ *nr_seg = i - start;
+ return 0;
+}
+
+static int io_kern_bvec_size(struct iovec *iov, unsigned nr_iovs,
+ struct io_mapped_ubuf *imu, unsigned *nr_segs)
+{
+ unsigned max_segs = 0;
+ size_t total_len = 0;
+ unsigned i;
+ int ret;
+
+ *nr_segs = 0;
+ for (i = 0; i < nr_iovs; i++) {
+ if (unlikely(!iov[i].iov_len))
+ return -EFAULT;
+ if (unlikely(check_add_overflow(total_len, iov[i].iov_len,
+ &total_len)))
+ return -EOVERFLOW;
+ ret = iov_kern_bvec_size(&iov[i], imu, &max_segs);
+ if (unlikely(ret))
+ return ret;
+ *nr_segs += max_segs;
+ }
+ if (total_len > MAX_RW_COUNT)
+ return -EINVAL;
+ return 0;
+}
+
+int io_import_reg_vec(int ddir, struct iov_iter *iter,
+ struct io_kiocb *req, struct iou_vec *vec,
+ unsigned nr_iovs, unsigned issue_flags)
+{
+ struct io_rsrc_node *node;
+ struct io_mapped_ubuf *imu;
+ unsigned iovec_off;
+ struct iovec *iov;
+ unsigned nr_segs;
+
+ node = io_find_buf_node(req, issue_flags);
+ if (!node)
+ return -EFAULT;
+ imu = node->buf;
+ if (!(imu->dir & (1 << ddir)))
+ return -EFAULT;
+
+ iovec_off = vec->nr - nr_iovs;
+ iov = vec->iovec + iovec_off;
+
+ if (imu->is_kbuf) {
+ int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
+
+ if (unlikely(ret))
+ return ret;
+ } else {
+ int ret = io_estimate_bvec_size(iov, nr_iovs, imu);
+
+ if (ret < 0)
+ return ret;
+ nr_segs = ret;
+ }
+
+ if (sizeof(struct bio_vec) > sizeof(struct iovec)) {
+ size_t bvec_bytes;
+
+ bvec_bytes = nr_segs * sizeof(struct bio_vec);
+ nr_segs = (bvec_bytes + sizeof(*iov) - 1) / sizeof(*iov);
+ nr_segs += nr_iovs;
+ }
+
+ if (nr_segs > vec->nr) {
+ struct iou_vec tmp_vec = {};
+ int ret;
+
+ ret = io_vec_realloc(&tmp_vec, nr_segs);
+ if (ret)
+ return ret;
+
+ iovec_off = tmp_vec.nr - nr_iovs;
+ memcpy(tmp_vec.iovec + iovec_off, iov, sizeof(*iov) * nr_iovs);
+ io_vec_free(vec);
+
+ *vec = tmp_vec;
+ iov = vec->iovec + iovec_off;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+
+ if (imu->is_kbuf)
+ return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
+
+ return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
+}
+
+int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
+ const struct iovec __user *uvec, size_t uvec_segs)
+{
+ struct iovec *iov;
+ int iovec_off, ret;
+ void *res;
+
+ if (uvec_segs > iv->nr) {
+ ret = io_vec_realloc(iv, uvec_segs);
+ if (ret)
+ return ret;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+
+ /* pad iovec to the right */
+ iovec_off = iv->nr - uvec_segs;
+ iov = iv->iovec + iovec_off;
+ res = iovec_from_user(uvec, uvec_segs, uvec_segs, iov,
+ io_is_compat(req->ctx));
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+ req->flags |= REQ_F_IMPORT_BUFFER;
return 0;
}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 2b8743645efc..d603f6a47f5e 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -2,172 +2,128 @@
#ifndef IOU_RSRC_H
#define IOU_RSRC_H
-#include <net/af_unix.h>
+#include <linux/io_uring_types.h>
+#include <linux/lockdep.h>
-#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3)
-#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT)
-#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1)
+#define IO_VEC_CACHE_SOFT_CAP 256
enum {
IORING_RSRC_FILE = 0,
IORING_RSRC_BUFFER = 1,
};
-struct io_rsrc_put {
- struct list_head list;
+struct io_rsrc_node {
+ unsigned char type;
+ int refs;
+
u64 tag;
union {
- void *rsrc;
- struct file *file;
+ unsigned long file_ptr;
struct io_mapped_ubuf *buf;
};
};
-typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
-
-struct io_rsrc_data {
- struct io_ring_ctx *ctx;
-
- u64 **tags;
- unsigned int nr;
- rsrc_put_fn *do_put;
- atomic_t refs;
- struct completion done;
- bool quiesce;
-};
-
-struct io_rsrc_node {
- struct percpu_ref refs;
- struct list_head node;
- struct list_head rsrc_list;
- struct io_rsrc_data *rsrc_data;
- struct llist_node llist;
- bool done;
+enum {
+ IO_IMU_DEST = 1 << ITER_DEST,
+ IO_IMU_SOURCE = 1 << ITER_SOURCE,
};
struct io_mapped_ubuf {
u64 ubuf;
- u64 ubuf_end;
+ unsigned int len;
unsigned int nr_bvecs;
+ unsigned int folio_shift;
+ refcount_t refs;
unsigned long acct_pages;
- struct bio_vec bvec[];
+ void (*release)(void *);
+ void *priv;
+ bool is_kbuf;
+ u8 dir;
+ struct bio_vec bvec[] __counted_by(nr_bvecs);
+};
+
+struct io_imu_folio_data {
+ /* Head folio can be partially included in the fixed buf */
+ unsigned int nr_pages_head;
+ /* For non-head/tail folios, has to be fully included */
+ unsigned int nr_pages_mid;
+ unsigned int folio_shift;
+ unsigned int nr_folios;
+ unsigned long first_folio_page_idx;
};
-void io_rsrc_put_tw(struct callback_head *cb);
-void io_rsrc_put_work(struct work_struct *work);
-void io_rsrc_refs_refill(struct io_ring_ctx *ctx);
-void io_wait_rsrc_data(struct io_rsrc_data *data);
-void io_rsrc_node_destroy(struct io_rsrc_node *ref_node);
-void io_rsrc_refs_drop(struct io_ring_ctx *ctx);
-int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
-int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
- struct io_rsrc_node *node, void *rsrc);
-void io_rsrc_node_switch(struct io_ring_ctx *ctx,
- struct io_rsrc_data *data_to_kill);
-
-int io_import_fixed(int ddir, struct iov_iter *iter,
- struct io_mapped_ubuf *imu,
- u64 buf_addr, size_t len);
-
-void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
+bool io_rsrc_cache_init(struct io_ring_ctx *ctx);
+void io_rsrc_cache_free(struct io_ring_ctx *ctx);
+struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx, int type);
+void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node);
+void io_rsrc_data_free(struct io_ring_ctx *ctx, struct io_rsrc_data *data);
+int io_rsrc_data_alloc(struct io_rsrc_data *data, unsigned nr);
+
+struct io_rsrc_node *io_find_buf_node(struct io_kiocb *req,
+ unsigned issue_flags);
+int io_import_reg_buf(struct io_kiocb *req, struct iov_iter *iter,
+ u64 buf_addr, size_t len, int ddir,
+ unsigned issue_flags);
+int io_import_reg_vec(int ddir, struct iov_iter *iter,
+ struct io_kiocb *req, struct iou_vec *vec,
+ unsigned nr_iovs, unsigned issue_flags);
+int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
+ const struct iovec __user *uvec, size_t uvec_segs);
+
+int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned int nr_args, u64 __user *tags);
-void __io_sqe_files_unregister(struct io_ring_ctx *ctx);
int io_sqe_files_unregister(struct io_ring_ctx *ctx);
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args, u64 __user *tags);
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
-
-#if defined(CONFIG_UNIX)
-static inline bool io_file_need_scm(struct file *filp)
-{
- return !!unix_get_socket(filp);
-}
-#else
-static inline bool io_file_need_scm(struct file *filp)
-{
- return false;
-}
-#endif
-
-static inline int io_scm_file_account(struct io_ring_ctx *ctx,
- struct file *file)
-{
- if (likely(!io_file_need_scm(file)))
- return 0;
- return __io_scm_file_account(ctx, file);
-}
-
int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args);
int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
unsigned size, unsigned type);
int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
unsigned int size, unsigned int type);
+int io_validate_user_buf_range(u64 uaddr, u64 ulen);
-static inline void io_rsrc_put_node(struct io_rsrc_node *node, int nr)
-{
- percpu_ref_put_many(&node->refs, nr);
-}
-
-static inline void io_req_put_rsrc(struct io_kiocb *req)
-{
- if (req->rsrc_node)
- io_rsrc_put_node(req->rsrc_node, 1);
-}
-
-static inline void io_req_put_rsrc_locked(struct io_kiocb *req,
- struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- struct io_rsrc_node *node = req->rsrc_node;
-
- if (node) {
- if (node == ctx->rsrc_node)
- ctx->rsrc_cached_refs++;
- else
- io_rsrc_put_node(node, 1);
- }
-}
+bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
+ struct io_imu_folio_data *data);
-static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx)
+static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
+ int index)
{
- ctx->rsrc_cached_refs--;
- if (unlikely(ctx->rsrc_cached_refs < 0))
- io_rsrc_refs_refill(ctx);
+ if (index < data->nr)
+ return data->nodes[array_index_nospec(index, data->nr)];
+ return NULL;
}
-static inline void io_req_set_rsrc_node(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned int issue_flags)
+static inline void io_put_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
{
- if (!req->rsrc_node) {
- req->rsrc_node = ctx->rsrc_node;
-
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- lockdep_assert_held(&ctx->uring_lock);
-
- io_charge_rsrc_node(ctx);
- } else {
- percpu_ref_get(&req->rsrc_node->refs);
- }
- }
+ lockdep_assert_held(&ctx->uring_lock);
+ if (!--node->refs)
+ io_free_rsrc_node(ctx, node);
}
-static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
+static inline bool io_reset_rsrc_node(struct io_ring_ctx *ctx,
+ struct io_rsrc_data *data, int index)
{
- unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK;
- unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT;
+ struct io_rsrc_node *node = data->nodes[index];
- return &data->tags[table_idx][off];
+ if (!node)
+ return false;
+ io_put_rsrc_node(ctx, node);
+ data->nodes[index] = NULL;
+ return true;
}
int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
+int io_account_mem(struct user_struct *user, struct mm_struct *mm_account,
+ unsigned long nr_pages);
+void io_unaccount_mem(struct user_struct *user, struct mm_struct *mm_account,
+ unsigned long nr_pages);
static inline void __io_unaccount_mem(struct user_struct *user,
unsigned long nr_pages)
@@ -175,4 +131,21 @@ static inline void __io_unaccount_mem(struct user_struct *user,
atomic_long_sub(nr_pages, &user->locked_vm);
}
+void io_vec_free(struct iou_vec *iv);
+int io_vec_realloc(struct iou_vec *iv, unsigned nr_entries);
+
+static inline void io_vec_reset_iovec(struct iou_vec *iv,
+ struct iovec *iovec, unsigned nr)
+{
+ io_vec_free(iv);
+ iv->iovec = iovec;
+ iv->nr = nr;
+}
+
+static inline void io_alloc_cache_vec_kasan(struct iou_vec *iv)
+{
+ if (IS_ENABLED(CONFIG_KASAN))
+ io_vec_free(iv);
+}
+
#endif
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 9c3ddd46a1ad..70ca88cc1f54 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -10,16 +10,23 @@
#include <linux/poll.h>
#include <linux/nospec.h>
#include <linux/compat.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
+#include <linux/indirect_call_wrapper.h>
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "opdef.h"
#include "kbuf.h"
+#include "alloc_cache.h"
#include "rsrc.h"
+#include "poll.h"
#include "rw.h"
+static void io_complete_rw(struct kiocb *kiocb, long res);
+static void io_complete_rw_iopoll(struct kiocb *kiocb, long res);
+
struct io_rw {
/* NOTE: kiocb has the file as the first member, so don't do it here */
struct kiocb kiocb;
@@ -28,29 +35,31 @@ struct io_rw {
rwf_t flags;
};
-static inline bool io_file_supports_nowait(struct io_kiocb *req)
+static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
{
- return req->flags & REQ_F_SUPPORT_NOWAIT;
+ /* If FMODE_NOWAIT is set for a file, we're golden */
+ if (req->flags & REQ_F_SUPPORT_NOWAIT)
+ return true;
+ /* No FMODE_NOWAIT, if we can poll, check the status */
+ if (io_file_can_poll(req)) {
+ struct poll_table_struct pt = { ._key = mask };
+
+ return vfs_poll(req->file, &pt) & mask;
+ }
+ /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
+ return false;
}
-#ifdef CONFIG_COMPAT
static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
{
- struct compat_iovec __user *uiov;
- compat_ssize_t clen;
+ struct compat_iovec __user *uiov = u64_to_user_ptr(rw->addr);
+ struct compat_iovec iov;
- uiov = u64_to_user_ptr(rw->addr);
- if (!access_ok(uiov, sizeof(*uiov)))
+ if (copy_from_user(&iov, uiov, sizeof(iov)))
return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
-
- rw->len = clen;
+ rw->len = iov.iov_len;
return 0;
}
-#endif
static int io_iov_buffer_select_prep(struct io_kiocb *req)
{
@@ -61,10 +70,8 @@ static int io_iov_buffer_select_prep(struct io_kiocb *req)
if (rw->len != 1)
return -EINVAL;
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
+ if (io_is_compat(req->ctx))
return io_iov_compat_buffer_select_prep(rw);
-#endif
uiov = u64_to_user_ptr(rw->addr);
if (copy_from_user(&iov, uiov, sizeof(*uiov)))
@@ -73,27 +80,192 @@ static int io_iov_buffer_select_prep(struct io_kiocb *req)
return 0;
}
-int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int io_import_vec(int ddir, struct io_kiocb *req,
+ struct io_async_rw *io,
+ const struct iovec __user *uvec,
+ size_t uvec_segs)
+{
+ int ret, nr_segs;
+ struct iovec *iov;
+
+ if (io->vec.iovec) {
+ nr_segs = io->vec.nr;
+ iov = io->vec.iovec;
+ } else {
+ nr_segs = 1;
+ iov = &io->fast_iov;
+ }
+
+ ret = __import_iovec(ddir, uvec, uvec_segs, nr_segs, &iov, &io->iter,
+ io_is_compat(req->ctx));
+ if (unlikely(ret < 0))
+ return ret;
+ if (iov) {
+ req->flags |= REQ_F_NEED_CLEANUP;
+ io_vec_reset_iovec(&io->vec, iov, io->iter.nr_segs);
+ }
+ return 0;
+}
+
+static int __io_import_rw_buffer(int ddir, struct io_kiocb *req,
+ struct io_async_rw *io, struct io_br_sel *sel,
+ unsigned int issue_flags)
+{
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ size_t sqe_len = rw->len;
+
+ sel->addr = u64_to_user_ptr(rw->addr);
+ if (def->vectored && !(req->flags & REQ_F_BUFFER_SELECT))
+ return io_import_vec(ddir, req, io, sel->addr, sqe_len);
+
+ if (io_do_buffer_select(req)) {
+ *sel = io_buffer_select(req, &sqe_len, io->buf_group, issue_flags);
+ if (!sel->addr)
+ return -ENOBUFS;
+ rw->addr = (unsigned long) sel->addr;
+ rw->len = sqe_len;
+ }
+ return import_ubuf(ddir, sel->addr, sqe_len, &io->iter);
+}
+
+static inline int io_import_rw_buffer(int rw, struct io_kiocb *req,
+ struct io_async_rw *io,
+ struct io_br_sel *sel,
+ unsigned int issue_flags)
+{
+ int ret;
+
+ ret = __io_import_rw_buffer(rw, req, io, sel, issue_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ iov_iter_save_state(&io->iter, &io->iter_state);
+ return 0;
+}
+
+static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_async_rw *rw = req->async_data;
+
+ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
+ return;
+
+ io_alloc_cache_vec_kasan(&rw->vec);
+ if (rw->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&rw->vec);
+
+ if (io_alloc_cache_put(&req->ctx->rw_cache, rw))
+ io_req_async_data_clear(req, 0);
+}
+
+static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
+{
+ /*
+ * Disable quick recycling for anything that's gone through io-wq.
+ * In theory, this should be fine to cleanup. However, some read or
+ * write iter handling touches the iovec AFTER having called into the
+ * handler, eg to reexpand or revert. This means we can have:
+ *
+ * task io-wq
+ * issue
+ * punt to io-wq
+ * issue
+ * blkdev_write_iter()
+ * ->ki_complete()
+ * io_complete_rw()
+ * queue tw complete
+ * run tw
+ * req_rw_cleanup
+ * iov_iter_count() <- look at iov_iter again
+ *
+ * which can lead to a UAF. This is only possible for io-wq offload
+ * as the cleanup can run in parallel. As io-wq is not the fast path,
+ * just leave cleanup to the end.
+ *
+ * This is really a bug in the core code that does this, any issue
+ * path should assume that a successful (or -EIOCBQUEUED) return can
+ * mean that the underlying data can be gone at any time. But that
+ * should be fixed separately, and then this check could be killed.
+ */
+ if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ io_rw_recycle(req, issue_flags);
+ }
+}
+
+static int io_rw_alloc_async(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_async_rw *rw;
+
+ rw = io_uring_alloc_async_data(&ctx->rw_cache, req);
+ if (!rw)
+ return -ENOMEM;
+ if (rw->vec.iovec)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ rw->bytes_done = 0;
+ return 0;
+}
+
+static inline void io_meta_save_state(struct io_async_rw *io)
+{
+ io->meta_state.seed = io->meta.seed;
+ iov_iter_save_state(&io->meta.iter, &io->meta_state.iter_meta);
+}
+
+static inline void io_meta_restore(struct io_async_rw *io, struct kiocb *kiocb)
+{
+ if (kiocb->ki_flags & IOCB_HAS_METADATA) {
+ io->meta.seed = io->meta_state.seed;
+ iov_iter_restore(&io->meta.iter, &io->meta_state.iter_meta);
+ }
+}
+
+static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir,
+ u64 attr_ptr, u64 attr_type_mask)
+{
+ struct io_uring_attr_pi pi_attr;
+ struct io_async_rw *io;
+ int ret;
+
+ if (copy_from_user(&pi_attr, u64_to_user_ptr(attr_ptr),
+ sizeof(pi_attr)))
+ return -EFAULT;
+
+ if (pi_attr.rsvd)
+ return -EINVAL;
+
+ io = req->async_data;
+ io->meta.flags = pi_attr.flags;
+ io->meta.app_tag = pi_attr.app_tag;
+ io->meta.seed = pi_attr.seed;
+ ret = import_ubuf(ddir, u64_to_user_ptr(pi_attr.addr),
+ pi_attr.len, &io->meta.iter);
+ if (unlikely(ret < 0))
+ return ret;
+ req->flags |= REQ_F_HAS_METADATA;
+ io_meta_save_state(io);
+ return ret;
+}
+
+static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int ddir)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ struct io_async_rw *io;
unsigned ioprio;
+ u64 attr_type_mask;
int ret;
+ if (io_rw_alloc_async(req))
+ return -ENOMEM;
+ io = req->async_data;
+
rw->kiocb.ki_pos = READ_ONCE(sqe->off);
/* used for fixed read/write too - just read unconditionally */
req->buf_index = READ_ONCE(sqe->buf_index);
-
- if (req->opcode == IORING_OP_READ_FIXED ||
- req->opcode == IORING_OP_WRITE_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
- }
+ io->buf_group = req->buf_index;
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
@@ -105,49 +277,196 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} else {
rw->kiocb.ki_ioprio = get_current_ioprio();
}
+ rw->kiocb.ki_flags = 0;
+ rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
+
+ if (req->ctx->flags & IORING_SETUP_IOPOLL)
+ rw->kiocb.ki_complete = io_complete_rw_iopoll;
+ else
+ rw->kiocb.ki_complete = io_complete_rw;
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
- rw->flags = READ_ONCE(sqe->rw_flags);
+ rw->flags = (__force rwf_t) READ_ONCE(sqe->rw_flags);
- /* Have to do this validation here, as this is in io_read() rw->len might
- * have chanaged due to buffer selection
- */
- if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
- ret = io_iov_buffer_select_prep(req);
- if (ret)
- return ret;
+ attr_type_mask = READ_ONCE(sqe->attr_type_mask);
+ if (attr_type_mask) {
+ u64 attr_ptr;
+
+ /* only PI attribute is supported currently */
+ if (attr_type_mask != IORING_RW_ATTR_FLAG_PI)
+ return -EINVAL;
+
+ attr_ptr = READ_ONCE(sqe->attr_ptr);
+ return io_prep_rw_pi(req, rw, ddir, attr_ptr, attr_type_mask);
}
+ return 0;
+}
+
+static int io_rw_do_import(struct io_kiocb *req, int ddir)
+{
+ struct io_br_sel sel = { };
+
+ if (io_do_buffer_select(req))
+ return 0;
+
+ return io_import_rw_buffer(ddir, req, req->async_data, &sel, 0);
+}
+
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int ddir)
+{
+ int ret;
+
+ ret = __io_prep_rw(req, sqe, ddir);
+ if (unlikely(ret))
+ return ret;
+
+ return io_rw_do_import(req, ddir);
+}
+
+int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return io_prep_rw(req, sqe, ITER_DEST);
+}
+
+int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return io_prep_rw(req, sqe, ITER_SOURCE);
+}
+
+static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int ddir)
+{
+ int ret;
+
+ ret = io_prep_rw(req, sqe, ddir);
+ if (unlikely(ret))
+ return ret;
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return 0;
+
+ /*
+ * Have to do this validation here, as this is in io_read() rw->len
+ * might have changed due to buffer selection
+ */
+ return io_iov_buffer_select_prep(req);
+}
+
+int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return io_prep_rwv(req, sqe, ITER_DEST);
+}
+
+int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return io_prep_rwv(req, sqe, ITER_SOURCE);
+}
+
+static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags,
+ int ddir)
+{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ struct io_async_rw *io = req->async_data;
+ int ret;
+
+ if (io->bytes_done)
+ return 0;
+
+ ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir,
+ issue_flags);
+ iov_iter_save_state(&io->iter, &io->iter_state);
+ return ret;
+}
+int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return __io_prep_rw(req, sqe, ITER_DEST);
+}
+
+int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ return __io_prep_rw(req, sqe, ITER_SOURCE);
+}
+
+static int io_rw_import_reg_vec(struct io_kiocb *req,
+ struct io_async_rw *io,
+ int ddir, unsigned int issue_flags)
+{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ unsigned uvec_segs = rw->len;
+ int ret;
+
+ ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec,
+ uvec_segs, issue_flags);
+ if (unlikely(ret))
+ return ret;
+ iov_iter_save_state(&io->iter, &io->iter_state);
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
return 0;
}
-void io_readv_writev_cleanup(struct io_kiocb *req)
+static int io_rw_prep_reg_vec(struct io_kiocb *req)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct io_async_rw *io = req->async_data;
+ const struct iovec __user *uvec;
- kfree(io->free_iovec);
+ uvec = u64_to_user_ptr(rw->addr);
+ return io_prep_reg_iovec(req, &io->vec, uvec, rw->len);
}
-static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
+int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- switch (ret) {
- case -EIOCBQUEUED:
- break;
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- case -ERESTARTNOHAND:
- case -ERESTART_RESTARTBLOCK:
- /*
- * We can't just restart the syscall, since previously
- * submitted sqes may already be in progress. Just fail this
- * IO with EINTR.
- */
- ret = -EINTR;
- fallthrough;
- default:
- kiocb->ki_complete(kiocb, ret);
- }
+ int ret;
+
+ ret = __io_prep_rw(req, sqe, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
+ return io_rw_prep_reg_vec(req);
+}
+
+int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ int ret;
+
+ ret = __io_prep_rw(req, sqe, ITER_SOURCE);
+ if (unlikely(ret))
+ return ret;
+ return io_rw_prep_reg_vec(req);
+}
+
+/*
+ * Multishot read is prepared just like a normal read/write request, only
+ * difference is that we set the MULTISHOT flag.
+ */
+int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ int ret;
+
+ /* must be used with provided buffers */
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+
+ ret = __io_prep_rw(req, sqe, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
+
+ if (rw->addr || rw->len)
+ return -EINVAL;
+
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+ return 0;
+}
+
+void io_readv_writev_cleanup(struct io_kiocb *req)
+{
+ struct io_async_rw *rw = req->async_data;
+
+ lockdep_assert_held(&req->ctx->uring_lock);
+ io_vec_free(&rw->vec);
+ io_rw_recycle(req, 0);
}
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
@@ -167,26 +486,12 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
return NULL;
}
-static void io_req_task_queue_reissue(struct io_kiocb *req)
-{
- req->io_task_work.func = io_queue_iowq;
- io_req_task_work_add(req);
-}
-
-#ifdef CONFIG_BLOCK
-static bool io_resubmit_prep(struct io_kiocb *req)
-{
- struct io_async_rw *io = req->async_data;
-
- if (!req_has_async_data(req))
- return !io_req_prep_async(req);
- iov_iter_restore(&io->s.iter, &io->s.iter_state);
- return true;
-}
-
static bool io_rw_should_reissue(struct io_kiocb *req)
{
+#ifdef CONFIG_BLOCK
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
umode_t mode = file_inode(req->file)->i_mode;
+ struct io_async_rw *io = req->async_data;
struct io_ring_ctx *ctx = req->ctx;
if (!S_ISBLK(mode) && !S_ISREG(mode))
@@ -201,36 +506,21 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
- /*
- * Play it safe and assume not safe to re-import and reissue if we're
- * not in the original thread group (or in task context).
- */
- if (!same_thread_group(req->task, current) || !in_task())
- return false;
+
+ io_meta_restore(io, &rw->kiocb);
+ iov_iter_restore(&io->iter, &io->iter_state);
return true;
-}
#else
-static bool io_resubmit_prep(struct io_kiocb *req)
-{
- return false;
-}
-static bool io_rw_should_reissue(struct io_kiocb *req)
-{
return false;
-}
#endif
+}
-static void kiocb_end_write(struct io_kiocb *req)
+static void io_req_end_write(struct io_kiocb *req)
{
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
if (req->flags & REQ_F_ISREG) {
- struct super_block *sb = file_inode(req->file)->i_sb;
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- __sb_writers_acquired(sb, SB_FREEZE_WRITE);
- sb_end_write(sb);
+ kiocb_end_write(&rw->kiocb);
}
}
@@ -243,30 +533,23 @@ static void io_req_io_end(struct io_kiocb *req)
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
- kiocb_end_write(req);
+ io_req_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
}
-static bool __io_complete_rw_common(struct io_kiocb *req, long res)
+static void __io_complete_rw_common(struct io_kiocb *req, long res)
{
- if (unlikely(res != req->cqe.res)) {
- if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
- io_rw_should_reissue(req)) {
- /*
- * Reissue will start accounting again, finish the
- * current cycle.
- */
- io_req_io_end(req);
- req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
- return true;
- }
+ if (res == req->cqe.res)
+ return;
+ if ((res == -EOPNOTSUPP || res == -EAGAIN) && io_rw_should_reissue(req)) {
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+ } else {
req_set_fail(req);
req->cqe.res = res;
}
- return false;
}
static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
@@ -283,16 +566,17 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
return res;
}
-static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
+void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *req = tw_req.req;
+
io_req_io_end(req);
- if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
- unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+ if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
+ req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
- req->cqe.flags |= io_put_kbuf(req, issue_flags);
- }
- io_req_task_complete(req, locked);
+ io_req_rw_cleanup(req, 0);
+ io_req_task_complete(tw_req, tw);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -300,11 +584,10 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw);
- if (__io_complete_rw_common(req, res))
- return;
+ __io_complete_rw_common(req, res);
io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_rw_complete;
- io_req_task_work_add(req);
+ __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
@@ -313,108 +596,76 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
struct io_kiocb *req = cmd_to_io_kiocb(rw);
if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
+ io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
- if (res == -EAGAIN && io_rw_should_reissue(req)) {
- req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
- return;
- }
- req->cqe.res = res;
+ if (res == -EAGAIN && io_rw_should_reissue(req))
+ req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE;
+ else
+ req->cqe.res = res;
}
/* order with io_iopoll_complete() checking ->iopoll_completed */
smp_store_release(&req->iopoll_completed, 1);
}
-static int kiocb_done(struct io_kiocb *req, ssize_t ret,
- unsigned int issue_flags)
+static inline void io_rw_done(struct io_kiocb *req, ssize_t ret)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- unsigned final_ret = io_fixup_rw_res(req, ret);
- if (req->flags & REQ_F_CUR_POS)
- req->file->f_pos = rw->kiocb.ki_pos;
- if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
- if (!__io_complete_rw_common(req, ret)) {
+ /* IO was queued async, completion will happen later */
+ if (ret == -EIOCBQUEUED)
+ return;
+
+ /* transform internal restart error codes */
+ if (unlikely(ret < 0)) {
+ switch (ret) {
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ case -ERESTARTNOHAND:
+ case -ERESTART_RESTARTBLOCK:
/*
- * Safe to call io_end from here as we're inline
- * from the submission path.
+ * We can't just restart the syscall, since previously
+ * submitted sqes may already be in progress. Just fail
+ * this IO with EINTR.
*/
- io_req_io_end(req);
- io_req_set_res(req, final_ret,
- io_put_kbuf(req, issue_flags));
- return IOU_OK;
+ ret = -EINTR;
+ break;
}
- } else {
- io_rw_done(&rw->kiocb, ret);
}
- if (req->flags & REQ_F_REISSUE) {
- req->flags &= ~REQ_F_REISSUE;
- if (io_resubmit_prep(req))
- io_req_task_queue_reissue(req);
- else
- io_req_task_queue_fail(req, final_ret);
- }
- return IOU_ISSUE_SKIP_COMPLETE;
+ if (req->ctx->flags & IORING_SETUP_IOPOLL)
+ io_complete_rw_iopoll(&rw->kiocb, ret);
+ else
+ io_complete_rw(&rw->kiocb, ret);
}
-static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
- struct io_rw_state *s,
- unsigned int issue_flags)
+static int kiocb_done(struct io_kiocb *req, ssize_t ret,
+ struct io_br_sel *sel, unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct iov_iter *iter = &s->iter;
- u8 opcode = req->opcode;
- struct iovec *iovec;
- void __user *buf;
- size_t sqe_len;
- ssize_t ret;
-
- if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
- ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
- if (ret)
- return ERR_PTR(ret);
- return NULL;
- }
+ unsigned final_ret = io_fixup_rw_res(req, ret);
- buf = u64_to_user_ptr(rw->addr);
- sqe_len = rw->len;
-
- if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
- (req->flags & REQ_F_BUFFER_SELECT)) {
- if (io_do_buffer_select(req)) {
- buf = io_buffer_select(req, &sqe_len, issue_flags);
- if (!buf)
- return ERR_PTR(-ENOBUFS);
- rw->addr = (unsigned long) buf;
- rw->len = sqe_len;
- }
+ if (ret >= 0 && req->flags & REQ_F_CUR_POS)
+ req->file->f_pos = rw->kiocb.ki_pos;
+ if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) {
+ u32 cflags = 0;
- ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
- if (ret)
- return ERR_PTR(ret);
- return NULL;
+ __io_complete_rw_common(req, ret);
+ /*
+ * Safe to call io_end from here as we're inline
+ * from the submission path.
+ */
+ io_req_io_end(req);
+ if (sel)
+ cflags = io_put_kbuf(req, ret, sel->buf_list);
+ io_req_set_res(req, final_ret, cflags);
+ io_req_rw_cleanup(req, issue_flags);
+ return IOU_COMPLETE;
+ } else {
+ io_rw_done(req, ret);
}
- iovec = s->fast_iov;
- ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
- req->ctx->compat);
- if (unlikely(ret < 0))
- return ERR_PTR(ret);
- return iovec;
-}
-
-static inline int io_import_iovec(int rw, struct io_kiocb *req,
- struct iovec **iovec, struct io_rw_state *s,
- unsigned int issue_flags)
-{
- *iovec = __io_import_iovec(rw, req, s, issue_flags);
- if (unlikely(IS_ERR(*iovec)))
- return PTR_ERR(*iovec);
-
- iov_iter_save_state(&s->iter, &s->iter_state);
- return 0;
+ return IOU_ISSUE_SKIP_COMPLETE;
}
static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
@@ -428,6 +679,7 @@ static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
*/
static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
{
+ struct io_kiocb *req = cmd_to_io_kiocb(rw);
struct kiocb *kiocb = &rw->kiocb;
struct file *file = kiocb->ki_filp;
ssize_t ret = 0;
@@ -443,27 +695,31 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
if ((kiocb->ki_flags & IOCB_NOWAIT) &&
!(kiocb->ki_filp->f_flags & O_NONBLOCK))
return -EAGAIN;
+ if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf)
+ return -EFAULT;
ppos = io_kiocb_ppos(kiocb);
while (iov_iter_count(iter)) {
- struct iovec iovec;
+ void __user *addr;
+ size_t len;
ssize_t nr;
- if (!iov_iter_is_bvec(iter)) {
- iovec = iov_iter_iovec(iter);
+ if (iter_is_ubuf(iter)) {
+ addr = iter->ubuf + iter->iov_offset;
+ len = iov_iter_count(iter);
+ } else if (!iov_iter_is_bvec(iter)) {
+ addr = iter_iov_addr(iter);
+ len = iter_iov_len(iter);
} else {
- iovec.iov_base = u64_to_user_ptr(rw->addr);
- iovec.iov_len = rw->len;
+ addr = u64_to_user_ptr(rw->addr);
+ len = rw->len;
}
- if (ddir == READ) {
- nr = file->f_op->read(file, iovec.iov_base,
- iovec.iov_len, ppos);
- } else {
- nr = file->f_op->write(file, iovec.iov_base,
- iovec.iov_len, ppos);
- }
+ if (ddir == READ)
+ nr = file->f_op->read(file, addr, len, ppos);
+ else
+ nr = file->f_op->write(file, addr, len, ppos);
if (nr < 0) {
if (!ret)
@@ -479,89 +735,13 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
if (!rw->len)
break;
}
- if (nr != iovec.iov_len)
+ if (nr != len)
break;
}
return ret;
}
-static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
- const struct iovec *fast_iov, struct iov_iter *iter)
-{
- struct io_async_rw *io = req->async_data;
-
- memcpy(&io->s.iter, iter, sizeof(*iter));
- io->free_iovec = iovec;
- io->bytes_done = 0;
- /* can only be fixed buffers, no need to do anything */
- if (iov_iter_is_bvec(iter))
- return;
- if (!iovec) {
- unsigned iov_off = 0;
-
- io->s.iter.iov = io->s.fast_iov;
- if (iter->iov != fast_iov) {
- iov_off = iter->iov - fast_iov;
- io->s.iter.iov += iov_off;
- }
- if (io->s.fast_iov != fast_iov)
- memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
- sizeof(struct iovec) * iter->nr_segs);
- } else {
- req->flags |= REQ_F_NEED_CLEANUP;
- }
-}
-
-static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
- struct io_rw_state *s, bool force)
-{
- if (!force && !io_op_defs[req->opcode].prep_async)
- return 0;
- if (!req_has_async_data(req)) {
- struct io_async_rw *iorw;
-
- if (io_alloc_async_data(req)) {
- kfree(iovec);
- return -ENOMEM;
- }
-
- io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
- iorw = req->async_data;
- /* we've copied and mapped the iter, ensure state is saved */
- iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
- }
- return 0;
-}
-
-static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
-{
- struct io_async_rw *iorw = req->async_data;
- struct iovec *iov;
- int ret;
-
- /* submission path, ->uring_lock should already be taken */
- ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
- if (unlikely(ret < 0))
- return ret;
-
- iorw->bytes_done = 0;
- iorw->free_iovec = iov;
- if (iov)
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
-}
-
-int io_readv_prep_async(struct io_kiocb *req)
-{
- return io_rw_prep_async(req, ITER_DEST);
-}
-
-int io_writev_prep_async(struct io_kiocb *req)
-{
- return io_rw_prep_async(req, ITER_SOURCE);
-}
-
/*
* This is our waitqueue callback handler, registered through __folio_lock_async()
* when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -610,8 +790,11 @@ static bool io_rw_should_retry(struct io_kiocb *req)
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
- /* never retry for NOWAIT, we just complete with -EAGAIN */
- if (req->flags & REQ_F_NOWAIT)
+ /*
+ * Never retry for NOWAIT or a request with metadata, we just complete
+ * with -EAGAIN.
+ */
+ if (req->flags & (REQ_F_NOWAIT | REQ_F_HAS_METADATA))
return false;
/* Only for buffered IO */
@@ -622,7 +805,8 @@ static bool io_rw_should_retry(struct io_kiocb *req)
* just use poll if we can, and don't attempt if the fs doesn't
* support callback based unlocks
*/
- if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+ if (io_file_can_poll(req) ||
+ !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC))
return false;
wait->wait.func = io_async_buf_func;
@@ -640,7 +824,7 @@ static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
struct file *file = rw->kiocb.ki_filp;
if (likely(file->f_op->read_iter))
- return call_read_iter(file, &rw->kiocb, iter);
+ return file->f_op->read_iter(&rw->kiocb, iter);
else if (file->f_op->read)
return loop_rw_iter(READ, rw, iter);
else
@@ -653,7 +837,7 @@ static bool need_complete_io(struct io_kiocb *req)
S_ISBLK(file_inode(req->file)->i_mode);
}
-static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
+static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct kiocb *kiocb = &rw->kiocb;
@@ -661,94 +845,89 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
struct file *file = req->file;
int ret;
- if (unlikely(!file || !(file->f_mode & mode)))
+ if (unlikely(!(file->f_mode & mode)))
return -EBADF;
- if (!io_req_ffs_set(req))
- req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
+ if (!(req->flags & REQ_F_FIXED_FILE))
+ req->flags |= io_file_get_flags(file);
kiocb->ki_flags = file->f_iocb_flags;
- ret = kiocb_set_rw_flags(kiocb, rw->flags);
+ ret = kiocb_set_rw_flags(kiocb, rw->flags, rw_type);
if (unlikely(ret))
return ret;
- kiocb->ki_flags |= IOCB_ALLOC_CACHE;
/*
* If the file is marked O_NONBLOCK, still allow retry for it if it
* supports async. Otherwise it's impossible to use O_NONBLOCK files
* reliably. If not, or it IOCB_NOWAIT is set, don't retry.
*/
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+ if (kiocb->ki_flags & IOCB_NOWAIT ||
+ ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
return -EOPNOTSUPP;
-
kiocb->private = NULL;
kiocb->ki_flags |= IOCB_HIPRI;
- kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
+ if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
+ /* make sure every req only blocks once*/
+ req->flags &= ~REQ_F_IOPOLL_STATE;
+ req->iopoll_start = ktime_get_ns();
+ }
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
- kiocb->ki_complete = io_complete_rw;
+ }
+
+ if (req->flags & REQ_F_HAS_METADATA) {
+ struct io_async_rw *io = req->async_data;
+
+ if (!(file->f_mode & FMODE_HAS_METADATA))
+ return -EINVAL;
+
+ /*
+ * We have a union of meta fields with wpq used for buffered-io
+ * in io_async_rw, so fail it here.
+ */
+ if (!(req->file->f_flags & O_DIRECT))
+ return -EOPNOTSUPP;
+ kiocb->ki_flags |= IOCB_HAS_METADATA;
+ kiocb->private = &io->meta;
}
return 0;
}
-int io_read(struct io_kiocb *req, unsigned int issue_flags)
+static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
+ unsigned int issue_flags)
{
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct io_rw_state __s, *s = &__s;
- struct iovec *iovec;
+ struct io_async_rw *io = req->async_data;
struct kiocb *kiocb = &rw->kiocb;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- struct io_async_rw *io;
- ssize_t ret, ret2;
+ ssize_t ret;
loff_t *ppos;
- if (!req_has_async_data(req)) {
- ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags);
+ if (unlikely(ret))
+ return ret;
+ } else if (io_do_buffer_select(req)) {
+ ret = io_import_rw_buffer(ITER_DEST, req, io, sel, issue_flags);
if (unlikely(ret < 0))
return ret;
- } else {
- io = req->async_data;
- s = &io->s;
-
- /*
- * Safe and required to re-import if we're using provided
- * buffers, as we dropped the selected one before retry.
- */
- if (io_do_buffer_select(req)) {
- ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
- if (unlikely(ret < 0))
- return ret;
- }
-
- /*
- * We come here from an earlier attempt, restore our state to
- * match in case it doesn't. It's cheap enough that we don't
- * need to make this conditional.
- */
- iov_iter_restore(&s->iter, &s->iter_state);
- iovec = NULL;
}
- ret = io_rw_init_file(req, FMODE_READ);
- if (unlikely(ret)) {
- kfree(iovec);
+ ret = io_rw_init_file(req, FMODE_READ, READ);
+ if (unlikely(ret))
return ret;
- }
- req->cqe.res = iov_iter_count(&s->iter);
+ req->cqe.res = iov_iter_count(&io->iter);
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req))) {
- ret = io_setup_async_rw(req, iovec, s, true);
- return ret ?: -EAGAIN;
- }
+ if (unlikely(!io_file_supports_nowait(req, EPOLLIN)))
+ return -EAGAIN;
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
/* Ensure we clear previously set non-block flag */
@@ -758,17 +937,22 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
- if (unlikely(ret)) {
- kfree(iovec);
+ if (unlikely(ret))
return ret;
- }
- ret = io_iter_do_read(rw, &s->iter);
+ ret = io_iter_do_read(rw, &io->iter);
- if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
- req->flags &= ~REQ_F_REISSUE;
- /* if we can poll, just do that */
- if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
+ /*
+ * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
+ * issue, even though they should be returning -EAGAIN. To be safe,
+ * retry from blocking context for either.
+ */
+ if (ret == -EOPNOTSUPP && force_nonblock)
+ ret = -EAGAIN;
+
+ if (ret == -EAGAIN) {
+ /* If we can poll, just do that. */
+ if (io_file_can_poll(req))
return -EAGAIN;
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
@@ -778,11 +962,10 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
goto done;
ret = 0;
} else if (ret == -EIOCBQUEUED) {
- if (iovec)
- kfree(iovec);
return IOU_ISSUE_SKIP_COMPLETE;
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
- (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
+ (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
+ (issue_flags & IO_URING_F_MULTISHOT)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
@@ -792,21 +975,8 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
* untouched in case of error. Restore it and we'll advance it
* manually if we need to.
*/
- iov_iter_restore(&s->iter, &s->iter_state);
-
- ret2 = io_setup_async_rw(req, iovec, s, true);
- iovec = NULL;
- if (ret2) {
- ret = ret > 0 ? ret : ret2;
- goto done;
- }
-
- io = req->async_data;
- s = &io->s;
- /*
- * Now use our persistent iterator and state, if we aren't already.
- * We've restored and mapped the iter to match.
- */
+ iov_iter_restore(&io->iter, &io->iter_state);
+ io_meta_restore(io, kiocb);
do {
/*
@@ -814,11 +984,11 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
* above or inside this loop. Advance the iter by the bytes
* that were consumed.
*/
- iov_iter_advance(&s->iter, ret);
- if (!iov_iter_count(&s->iter))
+ iov_iter_advance(&io->iter, ret);
+ if (!iov_iter_count(&io->iter))
break;
io->bytes_done += ret;
- iov_iter_save_state(&s->iter, &s->iter_state);
+ iov_iter_save_state(&io->iter, &io->iter_state);
/* if we can retry, do so with the callbacks armed */
if (!io_rw_should_retry(req)) {
@@ -826,65 +996,155 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
}
- req->cqe.res = iov_iter_count(&s->iter);
+ req->cqe.res = iov_iter_count(&io->iter);
/*
* Now retry read with the IOCB_WAITQ parts set in the iocb. If
* we get -EIOCBQUEUED, then we'll get a notification when the
* desired page gets unlocked. We can also get a partial read
* here, and if we do, then just retry at the new offset.
*/
- ret = io_iter_do_read(rw, &s->iter);
+ ret = io_iter_do_read(rw, &io->iter);
if (ret == -EIOCBQUEUED)
return IOU_ISSUE_SKIP_COMPLETE;
/* we got some bytes, but not all. retry. */
kiocb->ki_flags &= ~IOCB_WAITQ;
- iov_iter_restore(&s->iter, &s->iter_state);
+ iov_iter_restore(&io->iter, &io->iter_state);
} while (ret > 0);
done:
- /* it's faster to check here then delegate to kfree */
- if (iovec)
- kfree(iovec);
- return kiocb_done(req, ret, issue_flags);
+ /* it's faster to check here than delegate to kfree */
+ return ret;
+}
+
+int io_read(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_br_sel sel = { };
+ int ret;
+
+ ret = __io_read(req, &sel, issue_flags);
+ if (ret >= 0)
+ return kiocb_done(req, ret, &sel, issue_flags);
+
+ if (req->flags & REQ_F_BUFFERS_COMMIT)
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ return ret;
+}
+
+int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ struct io_br_sel sel = { };
+ unsigned int cflags = 0;
+ int ret;
+
+ /*
+ * Multishot MUST be used on a pollable file
+ */
+ if (!io_file_can_poll(req))
+ return -EBADFD;
+
+ /* make it sync, multishot doesn't support async execution */
+ rw->kiocb.ki_complete = NULL;
+ ret = __io_read(req, &sel, issue_flags);
+
+ /*
+ * If we get -EAGAIN, recycle our buffer and just let normal poll
+ * handling arm it.
+ */
+ if (ret == -EAGAIN) {
+ /*
+ * Reset rw->len to 0 again to avoid clamping future mshot
+ * reads, in case the buffer size varies.
+ */
+ if (io_kbuf_recycle(req, sel.buf_list, issue_flags))
+ rw->len = 0;
+ return IOU_RETRY;
+ } else if (ret <= 0) {
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ if (ret < 0)
+ req_set_fail(req);
+ } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
+ cflags = io_put_kbuf(req, ret, sel.buf_list);
+ } else {
+ /*
+ * Any successful return value will keep the multishot read
+ * armed, if it's still set. Put our buffer and post a CQE. If
+ * we fail to post a CQE, or multishot is no longer set, then
+ * jump to the termination path. This request is then done.
+ */
+ cflags = io_put_kbuf(req, ret, sel.buf_list);
+ rw->len = 0; /* similarly to above, reset len to 0 */
+
+ if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ /*
+ * Force retry, as we might have more data to
+ * be read and otherwise it won't get retried
+ * until (if ever) another poll is triggered.
+ */
+ io_poll_multishot_retry(req);
+
+ return IOU_RETRY;
+ }
+ }
+
+ /*
+ * Either an error, or we've hit overflow posting the CQE. For any
+ * multishot request, hitting overflow will terminate it.
+ */
+ io_req_set_res(req, ret, cflags);
+ io_req_rw_cleanup(req, issue_flags);
+ return IOU_COMPLETE;
+}
+
+static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
+{
+ struct inode *inode;
+ bool ret;
+
+ if (!(req->flags & REQ_F_ISREG))
+ return true;
+ if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
+ kiocb_start_write(kiocb);
+ return true;
+ }
+
+ inode = file_inode(kiocb->ki_filp);
+ ret = sb_start_write_trylock(inode->i_sb);
+ if (ret)
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+ return ret;
}
int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
+ bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct io_rw_state __s, *s = &__s;
- struct iovec *iovec;
+ struct io_async_rw *io = req->async_data;
struct kiocb *kiocb = &rw->kiocb;
- bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
ssize_t ret, ret2;
loff_t *ppos;
- if (!req_has_async_data(req)) {
- ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
- if (unlikely(ret < 0))
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags);
+ if (unlikely(ret))
return ret;
- } else {
- struct io_async_rw *io = req->async_data;
-
- s = &io->s;
- iov_iter_restore(&s->iter, &s->iter_state);
- iovec = NULL;
}
- ret = io_rw_init_file(req, FMODE_WRITE);
- if (unlikely(ret)) {
- kfree(iovec);
+
+ ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
+ if (unlikely(ret))
return ret;
- }
- req->cqe.res = iov_iter_count(&s->iter);
+ req->cqe.res = iov_iter_count(&io->iter);
if (force_nonblock) {
/* If the file doesn't support async, just async punt */
- if (unlikely(!io_file_supports_nowait(req)))
- goto copy_iov;
+ if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
+ goto ret_eagain;
- /* File path supports NOWAIT for non-direct_IO only for block devices. */
+ /* Check if we can support NOWAIT. */
if (!(kiocb->ki_flags & IOCB_DIRECT) &&
- !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
- (req->flags & REQ_F_ISREG))
- goto copy_iov;
+ !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) &&
+ (req->flags & REQ_F_ISREG))
+ goto ret_eagain;
kiocb->ki_flags |= IOCB_NOWAIT;
} else {
@@ -895,37 +1155,20 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
ppos = io_kiocb_update_pos(req);
ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
- if (unlikely(ret)) {
- kfree(iovec);
+ if (unlikely(ret))
return ret;
- }
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * io_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (req->flags & REQ_F_ISREG) {
- sb_start_write(file_inode(req->file)->i_sb);
- __sb_writers_release(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE);
- }
+ if (unlikely(!io_kiocb_start_write(req, kiocb)))
+ return -EAGAIN;
kiocb->ki_flags |= IOCB_WRITE;
if (likely(req->file->f_op->write_iter))
- ret2 = call_write_iter(req->file, kiocb, &s->iter);
+ ret2 = req->file->f_op->write_iter(kiocb, &io->iter);
else if (req->file->f_op->write)
- ret2 = loop_rw_iter(WRITE, rw, &s->iter);
+ ret2 = loop_rw_iter(WRITE, rw, &io->iter);
else
ret2 = -EINVAL;
- if (req->flags & REQ_F_REISSUE) {
- req->flags &= ~REQ_F_REISSUE;
- ret2 = -EAGAIN;
- }
-
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
* retry them without IOCB_NOWAIT.
@@ -938,11 +1181,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
- goto copy_iov;
+ goto ret_eagain;
if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
- struct io_async_rw *io;
-
trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
req->cqe.res, ret2);
@@ -951,41 +1192,45 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
* in the worker. Also update bytes_done to account for
* the bytes already written.
*/
- iov_iter_save_state(&s->iter, &s->iter_state);
- ret = io_setup_async_rw(req, iovec, s, true);
-
- io = req->async_data;
- if (io)
- io->bytes_done += ret2;
+ iov_iter_save_state(&io->iter, &io->iter_state);
+ io->bytes_done += ret2;
if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
- return ret ? ret : -EAGAIN;
+ io_req_end_write(req);
+ return -EAGAIN;
}
done:
- ret = kiocb_done(req, ret2, issue_flags);
+ return kiocb_done(req, ret2, NULL, issue_flags);
} else {
-copy_iov:
- iov_iter_restore(&s->iter, &s->iter_state);
- ret = io_setup_async_rw(req, iovec, s, false);
- if (!ret) {
- if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
- return -EAGAIN;
- }
- return ret;
+ret_eagain:
+ iov_iter_restore(&io->iter, &io->iter_state);
+ io_meta_restore(io, kiocb);
+ if (kiocb->ki_flags & IOCB_WRITE)
+ io_req_end_write(req);
+ return -EAGAIN;
}
- /* it's reportedly faster than delegating the null check to kfree() */
- if (iovec)
- kfree(iovec);
- return ret;
}
-static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags)
{
- io_commit_cqring_flush(ctx);
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_cqring_wake(ctx);
+ int ret;
+
+ ret = io_init_rw_fixed(req, issue_flags, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
+
+ return io_read(req, issue_flags);
+}
+
+int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+ int ret;
+
+ ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE);
+ if (unlikely(ret))
+ return ret;
+
+ return io_write(req, issue_flags);
}
void io_rw_fail(struct io_kiocb *req)
@@ -996,10 +1241,82 @@ void io_rw_fail(struct io_kiocb *req)
io_req_set_res(req, res, req->cqe.flags);
}
+static int io_uring_classic_poll(struct io_kiocb *req, struct io_comp_batch *iob,
+ unsigned int poll_flags)
+{
+ struct file *file = req->file;
+
+ if (req->opcode == IORING_OP_URING_CMD) {
+ struct io_uring_cmd *ioucmd;
+
+ ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ return file->f_op->uring_cmd_iopoll(ioucmd, iob, poll_flags);
+ } else {
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+
+ return file->f_op->iopoll(&rw->kiocb, iob, poll_flags);
+ }
+}
+
+static u64 io_hybrid_iopoll_delay(struct io_ring_ctx *ctx, struct io_kiocb *req)
+{
+ struct hrtimer_sleeper timer;
+ enum hrtimer_mode mode;
+ ktime_t kt;
+ u64 sleep_time;
+
+ if (req->flags & REQ_F_IOPOLL_STATE)
+ return 0;
+
+ if (ctx->hybrid_poll_time == LLONG_MAX)
+ return 0;
+
+ /* Using half the running time to do schedule */
+ sleep_time = ctx->hybrid_poll_time / 2;
+
+ kt = ktime_set(0, sleep_time);
+ req->flags |= REQ_F_IOPOLL_STATE;
+
+ mode = HRTIMER_MODE_REL;
+ hrtimer_setup_sleeper_on_stack(&timer, CLOCK_MONOTONIC, mode);
+ hrtimer_set_expires(&timer.timer, kt);
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_sleeper_start_expires(&timer, mode);
+
+ if (timer.task)
+ io_schedule();
+
+ hrtimer_cancel(&timer.timer);
+ __set_current_state(TASK_RUNNING);
+ destroy_hrtimer_on_stack(&timer.timer);
+ return sleep_time;
+}
+
+static int io_uring_hybrid_poll(struct io_kiocb *req,
+ struct io_comp_batch *iob, unsigned int poll_flags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ u64 runtime, sleep_time;
+ int ret;
+
+ sleep_time = io_hybrid_iopoll_delay(ctx, req);
+ ret = io_uring_classic_poll(req, iob, poll_flags);
+ runtime = ktime_get_ns() - req->iopoll_start - sleep_time;
+
+ /*
+ * Use minimum sleep time if we're polling devices with different
+ * latencies. We could get more completions from the faster ones.
+ */
+ if (ctx->hybrid_poll_time > runtime)
+ ctx->hybrid_poll_time = runtime;
+
+ return ret;
+}
+
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
{
struct io_wq_work_node *pos, *start, *prev;
- unsigned int poll_flags = BLK_POLL_NOSLEEP;
+ unsigned int poll_flags = 0;
DEFINE_IO_COMP_BATCH(iob);
int nr_events = 0;
@@ -1012,7 +1329,6 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
wq_list_for_each(pos, start, &ctx->iopoll_list) {
struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
- struct file *file = req->file;
int ret;
/*
@@ -1023,29 +1339,23 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (READ_ONCE(req->iopoll_completed))
break;
- if (req->opcode == IORING_OP_URING_CMD) {
- struct io_uring_cmd *ioucmd;
-
- ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
- poll_flags);
- } else {
- struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL)
+ ret = io_uring_hybrid_poll(req, &iob, poll_flags);
+ else
+ ret = io_uring_classic_poll(req, &iob, poll_flags);
- ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
- }
if (unlikely(ret < 0))
return ret;
else if (ret)
poll_flags |= BLK_POLL_ONESHOT;
/* iopoll may have completed current req */
- if (!rq_list_empty(iob.req_list) ||
+ if (!rq_list_empty(&iob.req_list) ||
READ_ONCE(req->iopoll_completed))
break;
}
- if (!rq_list_empty(iob.req_list))
+ if (!rq_list_empty(&iob.req_list))
iob.complete(&iob);
else if (!pos)
return 0;
@@ -1058,24 +1368,27 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- if (unlikely(req->flags & REQ_F_CQE_SKIP))
- continue;
-
- req->cqe.flags = io_put_kbuf(req, 0);
- if (unlikely(!__io_fill_cqe_req(ctx, req))) {
- spin_lock(&ctx->completion_lock);
- io_req_cqe_overflow(req);
- spin_unlock(&ctx->completion_lock);
- }
+ req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL);
+ if (req->opcode != IORING_OP_URING_CMD)
+ io_req_rw_cleanup(req, 0);
}
-
if (unlikely(!nr_events))
return 0;
- io_commit_cqring(ctx);
- io_cqring_ev_posted_iopoll(ctx);
pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);
- io_free_batch_list(ctx, pos);
+
+ if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
+ return 0;
+ ctx->submit_state.compl_reqs.first = pos;
+ __io_submit_flush_completions(ctx);
return nr_events;
}
+
+void io_rw_cache_free(const void *entry)
+{
+ struct io_async_rw *rw = (struct io_async_rw *) entry;
+
+ io_vec_free(&rw->vec);
+ kfree(rw);
+}
diff --git a/io_uring/rw.h b/io_uring/rw.h
index 3b733f4b610a..9bd7fbf70ea9 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -1,24 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/io_uring_types.h>
#include <linux/pagemap.h>
-struct io_rw_state {
- struct iov_iter iter;
- struct iov_iter_state iter_state;
- struct iovec fast_iov[UIO_FASTIOV];
+struct io_meta_state {
+ u32 seed;
+ struct iov_iter_state iter_meta;
};
struct io_async_rw {
- struct io_rw_state s;
- const struct iovec *free_iovec;
+ struct iou_vec vec;
size_t bytes_done;
- struct wait_page_queue wpq;
+
+ struct_group(clear,
+ struct iov_iter iter;
+ struct iov_iter_state iter_state;
+ struct iovec fast_iov;
+ unsigned buf_group;
+
+ /*
+ * wpq is for buffered io, while meta fields are used with
+ * direct io
+ */
+ union {
+ struct wait_page_queue wpq;
+ struct {
+ struct uio_meta meta;
+ struct io_meta_state meta_state;
+ };
+ };
+ );
};
-int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_read(struct io_kiocb *req, unsigned int issue_flags);
-int io_readv_prep_async(struct io_kiocb *req);
int io_write(struct io_kiocb *req, unsigned int issue_flags);
-int io_writev_prep_async(struct io_kiocb *req);
+int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags);
+int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags);
void io_readv_writev_cleanup(struct io_kiocb *req);
void io_rw_fail(struct io_kiocb *req);
+void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw);
+int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags);
+void io_rw_cache_free(const void *entry);
diff --git a/io_uring/slist.h b/io_uring/slist.h
index f27601fa4660..7ef747442754 100644
--- a/io_uring/slist.h
+++ b/io_uring/slist.h
@@ -3,6 +3,9 @@
#include <linux/io_uring_types.h>
+#define __wq_list_for_each(pos, head) \
+ for (pos = (head)->first; pos; pos = (pos)->next)
+
#define wq_list_for_each(pos, prv, head) \
for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
@@ -27,28 +30,6 @@ static inline void wq_list_add_after(struct io_wq_work_node *node,
list->last = node;
}
-/**
- * wq_list_merge - merge the second list to the first one.
- * @list0: the first list
- * @list1: the second list
- * Return the first node after mergence.
- */
-static inline struct io_wq_work_node *wq_list_merge(struct io_wq_work_list *list0,
- struct io_wq_work_list *list1)
-{
- struct io_wq_work_node *ret;
-
- if (!list0->first) {
- ret = list1->first;
- } else {
- ret = list0->first;
- list0->last->next = list1->first;
- }
- INIT_WQ_LIST(list0);
- INIT_WQ_LIST(list1);
- return ret;
-}
-
static inline void wq_list_add_tail(struct io_wq_work_node *node,
struct io_wq_work_list *list)
{
@@ -86,24 +67,6 @@ static inline void wq_list_cut(struct io_wq_work_list *list,
last->next = NULL;
}
-static inline void __wq_list_splice(struct io_wq_work_list *list,
- struct io_wq_work_node *to)
-{
- list->last->next = to->next;
- to->next = list->first;
- INIT_WQ_LIST(list);
-}
-
-static inline bool wq_list_splice(struct io_wq_work_list *list,
- struct io_wq_work_node *to)
-{
- if (!wq_list_empty(list)) {
- __wq_list_splice(list, to);
- return true;
- }
- return false;
-}
-
static inline void wq_stack_add_head(struct io_wq_work_node *node,
struct io_wq_work_node *stack)
{
@@ -135,4 +98,4 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
return container_of(work->list.next, struct io_wq_work, list);
}
-#endif // INTERNAL_IO_SLIST_H \ No newline at end of file
+#endif // INTERNAL_IO_SLIST_H
diff --git a/io_uring/splice.c b/io_uring/splice.c
index 53e4232d0866..e81ebbb91925 100644
--- a/io_uring/splice.c
+++ b/io_uring/splice.c
@@ -11,6 +11,7 @@
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "splice.h"
@@ -21,6 +22,7 @@ struct io_splice {
u64 len;
int splice_fd_in;
unsigned int flags;
+ struct io_rsrc_node *rsrc_node;
};
static int __io_splice_prep(struct io_kiocb *req,
@@ -34,6 +36,8 @@ static int __io_splice_prep(struct io_kiocb *req,
if (unlikely(sp->flags & ~valid_flags))
return -EINVAL;
sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
+ sp->rsrc_node = NULL;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -44,21 +48,48 @@ int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return __io_splice_prep(req, sqe);
}
+void io_splice_cleanup(struct io_kiocb *req)
+{
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
+
+ if (sp->rsrc_node)
+ io_put_rsrc_node(req->ctx, sp->rsrc_node);
+}
+
+static struct file *io_splice_get_file(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_rsrc_node *node;
+ struct file *file = NULL;
+
+ if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
+ return io_file_get_normal(req, sp->splice_fd_in);
+
+ io_ring_submit_lock(ctx, issue_flags);
+ node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
+ if (node) {
+ node->refs++;
+ sp->rsrc_node = node;
+ file = io_slot_file(node);
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+ io_ring_submit_unlock(ctx, issue_flags);
+ return file;
+}
+
int io_tee(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice);
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
struct file *in;
- long ret = 0;
+ ssize_t ret = 0;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
- if (sp->flags & SPLICE_F_FD_IN_FIXED)
- in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
- else
- in = io_file_get_normal(req, sp->splice_fd_in);
+ in = io_splice_get_file(req, issue_flags);
if (!in) {
ret = -EBADF;
goto done;
@@ -68,12 +99,12 @@ int io_tee(struct io_kiocb *req, unsigned int issue_flags)
ret = do_tee(in, out, sp->len, flags);
if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(in);
+ fput(in);
done:
if (ret != sp->len)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -92,15 +123,11 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
loff_t *poff_in, *poff_out;
struct file *in;
- long ret = 0;
+ ssize_t ret = 0;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
- if (sp->flags & SPLICE_F_FD_IN_FIXED)
- in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
- else
- in = io_file_get_normal(req, sp->splice_fd_in);
+ in = io_splice_get_file(req, issue_flags);
if (!in) {
ret = -EBADF;
goto done;
@@ -113,10 +140,10 @@ int io_splice(struct io_kiocb *req, unsigned int issue_flags)
ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(in);
+ fput(in);
done:
if (ret != sp->len)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
diff --git a/io_uring/splice.h b/io_uring/splice.h
index 542f94168ad3..b9b2848327fb 100644
--- a/io_uring/splice.h
+++ b/io_uring/splice.h
@@ -3,5 +3,6 @@
int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_tee(struct io_kiocb *req, unsigned int issue_flags);
+void io_splice_cleanup(struct io_kiocb *req);
int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_splice(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 559652380672..74c1a130cd87 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -10,14 +10,20 @@
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/security.h>
+#include <linux/cpuset.h>
+#include <linux/sched/cputime.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "tctx.h"
+#include "napi.h"
+#include "cancel.h"
#include "sqpoll.h"
#define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
+#define IORING_TW_CAP_ENTRIES_VALUE 32
enum {
IO_SQ_THREAD_SHOULD_STOP = 0,
@@ -27,7 +33,7 @@ enum {
void io_sq_thread_unpark(struct io_sq_data *sqd)
__releases(&sqd->lock)
{
- WARN_ON_ONCE(sqd->thread == current);
+ WARN_ON_ONCE(sqpoll_task_locked(sqd) == current);
/*
* Do the dance but not conditional clear_bit() because it'd race with
@@ -37,29 +43,38 @@ void io_sq_thread_unpark(struct io_sq_data *sqd)
if (atomic_dec_return(&sqd->park_pending))
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
mutex_unlock(&sqd->lock);
+ wake_up(&sqd->wait);
}
void io_sq_thread_park(struct io_sq_data *sqd)
__acquires(&sqd->lock)
{
- WARN_ON_ONCE(sqd->thread == current);
+ struct task_struct *tsk;
atomic_inc(&sqd->park_pending);
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
+
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk) {
+ WARN_ON_ONCE(tsk == current);
+ wake_up_process(tsk);
+ }
}
void io_sq_thread_stop(struct io_sq_data *sqd)
{
- WARN_ON_ONCE(sqd->thread == current);
+ struct task_struct *tsk;
+
WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
mutex_lock(&sqd->lock);
- if (sqd->thread)
- wake_up_process(sqd->thread);
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk) {
+ WARN_ON_ONCE(tsk == current);
+ wake_up_process(tsk);
+ }
mutex_unlock(&sqd->lock);
wait_for_completion(&sqd->exited);
}
@@ -103,29 +118,21 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
{
struct io_ring_ctx *ctx_attach;
struct io_sq_data *sqd;
- struct fd f;
+ CLASS(fd, f)(p->wq_fd);
- f = fdget(p->wq_fd);
- if (!f.file)
+ if (fd_empty(f))
return ERR_PTR(-ENXIO);
- if (!io_is_uring_fops(f.file)) {
- fdput(f);
+ if (!io_is_uring_fops(fd_file(f)))
return ERR_PTR(-EINVAL);
- }
- ctx_attach = f.file->private_data;
+ ctx_attach = fd_file(f)->private_data;
sqd = ctx_attach->sq_data;
- if (!sqd) {
- fdput(f);
+ if (!sqd)
return ERR_PTR(-EINVAL);
- }
- if (sqd->task_tgid != current->tgid) {
- fdput(f);
+ if (sqd->task_tgid != current->tgid)
return ERR_PTR(-EPERM);
- }
refcount_inc(&sqd->refs);
- fdput(f);
return sqd;
}
@@ -164,7 +171,38 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
return READ_ONCE(sqd->state);
}
-static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
+struct io_sq_time {
+ bool started;
+ u64 usec;
+};
+
+u64 io_sq_cpu_usec(struct task_struct *tsk)
+{
+ u64 utime, stime;
+
+ task_cputime_adjusted(tsk, &utime, &stime);
+ do_div(stime, 1000);
+ return stime;
+}
+
+static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist)
+{
+ if (!ist->started)
+ return;
+ ist->started = false;
+ sqd->work_time += io_sq_cpu_usec(current) - ist->usec;
+}
+
+static void io_sq_start_worktime(struct io_sq_time *ist)
+{
+ if (ist->started)
+ return;
+ ist->started = true;
+ ist->usec = io_sq_cpu_usec(current);
+}
+
+static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd,
+ bool cap_entries, struct io_sq_time *ist)
{
unsigned int to_submit;
int ret = 0;
@@ -174,9 +212,11 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
- if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
+ if (to_submit || !wq_list_empty(&ctx->iopoll_list)) {
const struct cred *creds = NULL;
+ io_sq_start_worktime(ist);
+
if (ctx->sq_creds != current_cred())
creds = override_creds(ctx->sq_creds);
@@ -212,32 +252,87 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd)
mutex_unlock(&sqd->lock);
if (signal_pending(current))
did_sig = get_signal(&ksig);
- cond_resched();
+ wait_event(sqd->wait, !atomic_read(&sqd->park_pending));
mutex_lock(&sqd->lock);
+ sqd->sq_cpu = raw_smp_processor_id();
}
return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
}
+/*
+ * Run task_work, processing the retry_list first. The retry_list holds
+ * entries that we passed on in the previous run, if we had more task_work
+ * than we were asked to process. Newly queued task_work isn't run until the
+ * retry list has been fully processed.
+ */
+static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
+{
+ struct io_uring_task *tctx = current->io_uring;
+ unsigned int count = 0;
+
+ if (*retry_list) {
+ *retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
+ if (count >= max_entries)
+ goto out;
+ max_entries -= count;
+ }
+ *retry_list = tctx_task_work_run(tctx, max_entries, &count);
+out:
+ if (task_work_pending(current))
+ task_work_run();
+ return count;
+}
+
+static bool io_sq_tw_pending(struct llist_node *retry_list)
+{
+ struct io_uring_task *tctx = current->io_uring;
+
+ return retry_list || !llist_empty(&tctx->task_list);
+}
+
static int io_sq_thread(void *data)
{
+ struct llist_node *retry_list = NULL;
struct io_sq_data *sqd = data;
struct io_ring_ctx *ctx;
unsigned long timeout = 0;
- char buf[TASK_COMM_LEN];
+ char buf[TASK_COMM_LEN] = {};
DEFINE_WAIT(wait);
+ /* offload context creation failed, just exit */
+ if (!current->io_uring) {
+ mutex_lock(&sqd->lock);
+ rcu_assign_pointer(sqd->thread, NULL);
+ put_task_struct(current);
+ mutex_unlock(&sqd->lock);
+ goto err_out;
+ }
+
snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
set_task_comm(current, buf);
- if (sqd->sq_cpu != -1)
+ /* reset to our pid after we've set task_comm, for fdinfo */
+ sqd->task_pid = current->pid;
+
+ if (sqd->sq_cpu != -1) {
set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
- else
+ } else {
set_cpus_allowed_ptr(current, cpu_online_mask);
- current->flags |= PF_NO_SETAFFINITY;
+ sqd->sq_cpu = raw_smp_processor_id();
+ }
+
+ /*
+ * Force audit context to get setup, in case we do prep side async
+ * operations that would trigger an audit call before any issue side
+ * audit has been done.
+ */
+ audit_uring_entry(IORING_OP_NOP);
+ audit_uring_exit(true, 0);
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
+ struct io_sq_time ist = { };
if (io_sqd_events_pending(sqd) || signal_pending(current)) {
if (io_sqd_handle_event(sqd))
@@ -247,23 +342,37 @@ static int io_sq_thread(void *data)
cap_entries = !list_is_singular(&sqd->ctx_list);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
- int ret = __io_sq_thread(ctx, cap_entries);
+ int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist);
if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
sqt_spin = true;
}
- if (io_run_task_work())
+ if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
+ list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
+ if (io_napi(ctx)) {
+ io_sq_start_worktime(&ist);
+ io_napi_sqpoll_busy_poll(ctx);
+ }
+ }
+
+ io_sq_update_worktime(sqd, &ist);
+
if (sqt_spin || !time_after(jiffies, timeout)) {
- cond_resched();
if (sqt_spin)
timeout = jiffies + sqd->sq_thread_idle;
+ if (unlikely(need_resched())) {
+ mutex_unlock(&sqd->lock);
+ cond_resched();
+ mutex_lock(&sqd->lock);
+ sqd->sq_cpu = raw_smp_processor_id();
+ }
continue;
}
prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
- if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
+ if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) {
bool needs_sched = true;
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -291,6 +400,7 @@ static int io_sq_thread(void *data)
mutex_unlock(&sqd->lock);
schedule();
mutex_lock(&sqd->lock);
+ sqd->sq_cpu = raw_smp_processor_id();
}
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
atomic_andnot(IORING_SQ_NEED_WAKEUP,
@@ -301,18 +411,22 @@ static int io_sq_thread(void *data)
timeout = jiffies + sqd->sq_thread_idle;
}
+ if (retry_list)
+ io_sq_tw(&retry_list, UINT_MAX);
+
io_uring_cancel_generic(true, sqd);
- sqd->thread = NULL;
+ rcu_assign_pointer(sqd->thread, NULL);
+ put_task_struct(current);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
io_run_task_work();
mutex_unlock(&sqd->lock);
-
+err_out:
complete(&sqd->exited);
do_exit(0);
}
-int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{
DEFINE_WAIT(wait);
@@ -327,7 +441,6 @@ int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
} while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait);
- return 0;
}
__cold int io_sq_offload_create(struct io_ring_ctx *ctx,
@@ -338,16 +451,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
/* Retain compatibility with failing for an invalid attach attempt */
if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
IORING_SETUP_ATTACH_WQ) {
- struct fd f;
-
- f = fdget(p->wq_fd);
- if (!f.file)
+ CLASS(fd, f)(p->wq_fd);
+ if (fd_empty(f))
return -ENXIO;
- if (!io_is_uring_fops(f.file)) {
- fdput(f);
+ if (!io_is_uring_fops(fd_file(f)))
return -EINVAL;
- }
- fdput(f);
}
if (ctx->flags & IORING_SETUP_SQPOLL) {
struct task_struct *tsk;
@@ -383,11 +491,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
return 0;
if (p->flags & IORING_SETUP_SQ_AFF) {
+ cpumask_var_t allowed_mask;
int cpu = p->sq_thread_cpu;
ret = -EINVAL;
if (cpu >= nr_cpu_ids || !cpu_online(cpu))
goto err_sqpoll;
+ ret = -ENOMEM;
+ if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
+ goto err_sqpoll;
+ ret = -EINVAL;
+ cpuset_cpus_allowed(current, allowed_mask);
+ if (!cpumask_test_cpu(cpu, allowed_mask)) {
+ free_cpumask_var(allowed_mask);
+ goto err_sqpoll;
+ }
+ free_cpumask_var(allowed_mask);
sqd->sq_cpu = cpu;
} else {
sqd->sq_cpu = -1;
@@ -401,7 +520,11 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
goto err_sqpoll;
}
- sqd->thread = tsk;
+ mutex_lock(&sqd->lock);
+ rcu_assign_pointer(sqd->thread, tsk);
+ mutex_unlock(&sqd->lock);
+
+ get_task_struct(tsk);
ret = io_uring_alloc_task_context(tsk, ctx);
wake_up_new_task(tsk);
if (ret)
@@ -411,7 +534,6 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
ret = -EINVAL;
goto err;
}
-
return 0;
err_sqpoll:
complete(&ctx->sq_data->exited);
@@ -419,3 +541,23 @@ err:
io_sq_thread_finish(ctx);
return ret;
}
+
+__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
+ cpumask_var_t mask)
+{
+ struct io_sq_data *sqd = ctx->sq_data;
+ int ret = -EINVAL;
+
+ if (sqd) {
+ struct task_struct *tsk;
+
+ io_sq_thread_park(sqd);
+ /* Don't set affinity for a dying thread */
+ tsk = sqpoll_task_locked(sqd);
+ if (tsk)
+ ret = io_wq_cpu_affinity(tsk->io_uring, mask);
+ io_sq_thread_unpark(sqd);
+ }
+
+ return ret;
+}
diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h
index 0c3fbcd1f583..fd2f6f29b516 100644
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -8,7 +8,7 @@ struct io_sq_data {
/* ctx's that are using this sqd */
struct list_head ctx_list;
- struct task_struct *thread;
+ struct task_struct __rcu *thread;
struct wait_queue_head wait;
unsigned sq_thread_idle;
@@ -16,6 +16,7 @@ struct io_sq_data {
pid_t task_pid;
pid_t task_tgid;
+ u64 work_time;
unsigned long state;
struct completion exited;
};
@@ -26,4 +27,12 @@ void io_sq_thread_stop(struct io_sq_data *sqd);
void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd);
-int io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
+u64 io_sq_cpu_usec(struct task_struct *tsk);
+
+static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd)
+{
+ return rcu_dereference_protected(sqd->thread,
+ lockdep_is_held(&sqd->lock));
+}
diff --git a/io_uring/statx.c b/io_uring/statx.c
index d8fc933d3f59..5111e9befbfe 100644
--- a/io_uring/statx.c
+++ b/io_uring/statx.c
@@ -36,9 +36,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
sx->flags = READ_ONCE(sqe->statx_flags);
- sx->filename = getname_flags(path,
- getname_statx_lookup_flags(sx->flags),
- NULL);
+ sx->filename = getname_uflags(path, sx->flags);
if (IS_ERR(sx->filename)) {
int ret = PTR_ERR(sx->filename);
@@ -48,6 +46,7 @@ int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -56,12 +55,11 @@ int io_statx(struct io_kiocb *req, unsigned int issue_flags)
struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_statx_cleanup(struct io_kiocb *req)
diff --git a/io_uring/sync.c b/io_uring/sync.c
index 64e87ea2b8fb..cea2d381ffd2 100644
--- a/io_uring/sync.c
+++ b/io_uring/sync.c
@@ -32,6 +32,8 @@ int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->len);
sync->flags = READ_ONCE(sqe->sync_range_flags);
+ req->flags |= REQ_F_FORCE_ASYNC;
+
return 0;
}
@@ -41,12 +43,11 @@ int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags)
int ret;
/* sync_file_range always requires a blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = sync_file_range(req->file, sync->off, sync->len, sync->flags);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -62,6 +63,7 @@ int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->len);
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -72,13 +74,12 @@ int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
int ret;
/* fsync always requires a blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX,
sync->flags & IORING_FSYNC_DATASYNC);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -91,6 +92,7 @@ int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sync->off = READ_ONCE(sqe->off);
sync->len = READ_ONCE(sqe->addr);
sync->mode = READ_ONCE(sqe->len);
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -100,11 +102,11 @@ int io_fallocate(struct io_kiocb *req, unsigned int issue_flags)
int ret;
/* fallocate always requiring blocking context */
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
+
ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len);
if (ret >= 0)
fsnotify_modify(req->file);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 4324b1cf1f6a..5b66755579c0 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -35,8 +35,6 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
data.hash = hash;
data.task = task;
- data.free_work = io_wq_free_work;
- data.do_work = io_wq_submit_work;
/* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
@@ -47,8 +45,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
void __io_uring_free(struct task_struct *tsk)
{
struct io_uring_task *tctx = tsk->io_uring;
+ struct io_tctx_node *node;
+ unsigned long index;
- WARN_ON_ONCE(!xa_empty(&tctx->xa));
+ /*
+ * Fault injection forcing allocation errors in the xa_store() path
+ * can lead to xa_empty() returning false, even though no actual
+ * node is stored in the xarray. Until that gets sorted out, attempt
+ * an iteration here and warn if any entries are found.
+ */
+ xa_for_each(&tctx->xa, index, node) {
+ WARN_ON_ONCE(1);
+ break;
+ }
WARN_ON_ONCE(tctx->io_wq);
WARN_ON_ONCE(tctx->cached_refs);
@@ -81,9 +90,10 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
return ret;
}
+ tctx->task = task;
xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait);
- atomic_set(&tctx->in_idle, 0);
+ atomic_set(&tctx->in_cancel, 0);
atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx;
init_llist_head(&tctx->task_list);
@@ -208,31 +218,40 @@ void io_uring_unreg_ringfd(void)
}
}
-static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
+int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int start, int end)
{
- struct file *file;
int offset;
-
for (offset = start; offset < end; offset++) {
offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
if (tctx->registered_rings[offset])
continue;
- file = fget(fd);
- if (!file) {
- return -EBADF;
- } else if (!io_is_uring_fops(file)) {
- fput(file);
- return -EOPNOTSUPP;
- }
tctx->registered_rings[offset] = file;
return offset;
}
-
return -EBUSY;
}
+static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
+ int start, int end)
+{
+ struct file *file;
+ int offset;
+
+ file = fget(fd);
+ if (!file) {
+ return -EBADF;
+ } else if (!io_is_uring_fops(file)) {
+ fput(file);
+ return -EOPNOTSUPP;
+ }
+ offset = io_ring_add_registered_file(tctx, file, start, end);
+ if (offset < 0)
+ fput(file);
+ return offset;
+}
+
/*
* Register a ring fd to avoid fdget/fdput for each io_uring_enter()
* invocation. User passes in an array of struct io_uring_rsrc_update
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 826a51bca3e4..d8fbbaf31cf3 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -17,6 +17,7 @@ struct io_timeout {
struct file *file;
u32 off;
u32 target_seq;
+ u32 repeats;
struct list_head list;
/* head of the link, used by linked timeouts only */
struct io_kiocb *head;
@@ -34,11 +35,15 @@ struct io_timeout_rem {
bool ltimeout;
};
+static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
+ struct io_kiocb *link);
+
static inline bool io_is_timeout_noseq(struct io_kiocb *req)
{
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
+ struct io_timeout_data *data = req->async_data;
- return !timeout->off;
+ return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT;
}
static inline void io_put_req(struct io_kiocb *req)
@@ -49,7 +54,62 @@ static inline void io_put_req(struct io_kiocb *req)
}
}
-static bool io_kill_timeout(struct io_kiocb *req, int status)
+static inline bool io_timeout_finish(struct io_timeout *timeout,
+ struct io_timeout_data *data)
+{
+ if (!(data->flags & IORING_TIMEOUT_MULTISHOT))
+ return true;
+
+ if (!timeout->off || (timeout->repeats && --timeout->repeats))
+ return false;
+
+ return true;
+}
+
+static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
+
+static void io_timeout_complete(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_kiocb *req = tw_req.req;
+ struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
+ struct io_timeout_data *data = req->async_data;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!io_timeout_finish(timeout, data)) {
+ if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
+ /* re-arm timer */
+ raw_spin_lock_irq(&ctx->timeout_lock);
+ list_add(&timeout->list, ctx->timeout_list.prev);
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
+ return;
+ }
+ }
+
+ io_req_task_complete(tw_req, tw);
+}
+
+static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
+{
+ if (list_empty(list))
+ return false;
+
+ while (!list_empty(list)) {
+ struct io_timeout *timeout;
+ struct io_kiocb *req;
+
+ timeout = list_first_entry(list, struct io_timeout, list);
+ list_del_init(&timeout->list);
+ req = cmd_to_io_kiocb(timeout);
+ if (err)
+ req_set_fail(req);
+ io_req_queue_tw_complete(req, err);
+ }
+
+ return true;
+}
+
+static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;
@@ -57,23 +117,19 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
if (hrtimer_try_to_cancel(&io->timer) != -1) {
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
- if (status)
- req_set_fail(req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- list_del_init(&timeout->list);
- io_req_queue_tw_complete(req, status);
- return true;
+ list_move_tail(&timeout->list, list);
}
- return false;
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
{
- u32 seq;
struct io_timeout *timeout, *tmp;
+ LIST_HEAD(list);
+ u32 seq;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
@@ -95,15 +151,18 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
if (events_got < events_needed)
break;
- io_kill_timeout(req, 0);
+ io_kill_timeout(req, &list);
}
ctx->cq_last_tm_flush = seq;
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
+ io_flush_killed_timeouts(&list, 0);
}
-static void io_req_tw_fail_links(struct io_kiocb *link, bool *locked)
+static void io_req_tw_fail_links(struct io_tw_req tw_req, io_tw_token_t tw)
{
- io_tw_lock(link->ctx, locked);
+ struct io_kiocb *link = tw_req.req;
+
+ io_tw_lock(link->ctx, tw);
while (link) {
struct io_kiocb *nxt = link->link;
long res = -ECANCELED;
@@ -112,7 +171,7 @@ static void io_req_tw_fail_links(struct io_kiocb *link, bool *locked)
res = link->cqe.res;
link->link = NULL;
io_req_set_res(link, res, 0);
- io_req_task_complete(link, locked);
+ io_req_task_complete((struct io_tw_req){link}, tw);
link = nxt;
}
}
@@ -164,9 +223,11 @@ void io_disarm_next(struct io_kiocb *req)
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
- link = io_disarm_linked_timeout(req);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
+ if (req->link && req->link->opcode == IORING_OP_LINK_TIMEOUT)
+ link = __io_disarm_linked_timeout(req, req->link);
+
+ raw_spin_unlock_irq(&ctx->timeout_lock);
if (link)
io_req_queue_tw_complete(link, -ECANCELED);
}
@@ -175,8 +236,8 @@ void io_disarm_next(struct io_kiocb *req)
io_fail_links(req);
}
-struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
- struct io_kiocb *link)
+static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
+ struct io_kiocb *link)
__must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock)
{
@@ -202,17 +263,17 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->timeout_lock, flags);
+ raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
list_del_init(&timeout->list);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
req_set_fail(req);
io_req_set_res(req, -ETIME, 0);
- req->io_task_work.func = io_req_task_complete;
+ req->io_task_work.func = io_timeout_complete;
io_req_task_work_add(req);
return HRTIMER_NORESTART;
}
@@ -228,16 +289,10 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
list_for_each_entry(timeout, &ctx->timeout_list, list) {
struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
- if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
- cd->data != tmp->cqe.user_data)
- continue;
- if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
- if (cd->seq == tmp->work.cancel_seq)
- continue;
- tmp->work.cancel_seq = cd->seq;
+ if (io_cancel_req_match(tmp, cd)) {
+ req = tmp;
+ break;
}
- req = tmp;
- break;
}
if (!req)
return ERR_PTR(-ENOENT);
@@ -255,9 +310,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
{
struct io_kiocb *req;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
req = io_timeout_extract(ctx, cd);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -265,28 +320,30 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
return 0;
}
-static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
+static void io_req_task_link_timeout(struct io_tw_req tw_req, io_tw_token_t tw)
{
- unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+ struct io_kiocb *req = tw_req.req;
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_kiocb *prev = timeout->prev;
- int ret = -ENOENT;
+ int ret;
if (prev) {
- if (!(req->task->flags & PF_EXITING)) {
+ if (!tw.cancel) {
struct io_cancel_data cd = {
.ctx = req->ctx,
.data = prev->cqe.user_data,
};
- ret = io_try_cancel(req->task->io_uring, &cd, issue_flags);
+ ret = io_try_cancel(req->tctx, &cd, 0);
+ } else {
+ ret = -ECANCELED;
}
io_req_set_res(req, ret ?: -ETIME, 0);
- io_req_task_complete(req, locked);
+ io_req_task_complete(tw_req, tw);
io_put_req(prev);
} else {
io_req_set_res(req, -ETIME, 0);
- io_req_task_complete(req, locked);
+ io_req_task_complete(tw_req, tw);
}
}
@@ -299,7 +356,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- spin_lock_irqsave(&ctx->timeout_lock, flags);
+ raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
prev = timeout->head;
timeout->head = NULL;
@@ -314,7 +371,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
}
list_del(&timeout->list);
timeout->prev = prev;
- spin_unlock_irqrestore(&ctx->timeout_lock, flags);
+ raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
req->io_task_work.func = io_req_task_link_timeout;
io_req_task_work_add(req);
@@ -359,8 +416,7 @@ static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
io = req->async_data;
if (hrtimer_try_to_cancel(&io->timer) == -1)
return -EALREADY;
- hrtimer_init(&io->timer, io_timeout_get_clock(io), mode);
- io->timer.function = io_link_timeout_fn;
+ hrtimer_setup(&io->timer, io_link_timeout_fn, io_timeout_get_clock(io), mode);
hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode);
return 0;
}
@@ -369,7 +425,7 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
struct timespec64 *ts, enum hrtimer_mode mode)
__must_hold(&ctx->timeout_lock)
{
- struct io_cancel_data cd = { .data = user_data, };
+ struct io_cancel_data cd = { .ctx = ctx, .data = user_data, };
struct io_kiocb *req = io_timeout_extract(ctx, &cd);
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_timeout_data *data;
@@ -379,10 +435,11 @@ static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
timeout->off = 0; /* noseq */
data = req->async_data;
+ data->ts = *ts;
+
list_add_tail(&timeout->list, &ctx->timeout_list);
- hrtimer_init(&data->timer, io_timeout_get_clock(data), mode);
- data->timer.function = io_timeout_fn;
- hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode);
+ hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), mode);
+ hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode);
return 0;
}
@@ -433,7 +490,7 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
int ret;
if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
- struct io_cancel_data cd = { .data = tr->addr, };
+ struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, };
spin_lock(&ctx->completion_lock);
ret = io_timeout_cancel(ctx, &cd);
@@ -441,18 +498,18 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
} else {
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
if (tr->ltimeout)
ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
else
ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
}
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
static int __io_timeout_prep(struct io_kiocb *req,
@@ -470,23 +527,33 @@ static int __io_timeout_prep(struct io_kiocb *req,
return -EINVAL;
flags = READ_ONCE(sqe->timeout_flags);
if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
- IORING_TIMEOUT_ETIME_SUCCESS))
+ IORING_TIMEOUT_ETIME_SUCCESS |
+ IORING_TIMEOUT_MULTISHOT))
return -EINVAL;
/* more than one clock specified is invalid, obviously */
if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
return -EINVAL;
+ /* multishot requests only make sense with rel values */
+ if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS)))
+ return -EINVAL;
INIT_LIST_HEAD(&timeout->list);
timeout->off = off;
if (unlikely(off && !req->ctx->off_timeout_used))
req->ctx->off_timeout_used = true;
+ /*
+ * for multishot reqs w/ fixed nr of repeats, repeats tracks the
+ * remaining nr
+ */
+ timeout->repeats = 0;
+ if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0)
+ timeout->repeats = off;
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
- if (io_alloc_async_data(req))
+ data = io_uring_alloc_async_data(NULL, req);
+ if (!data)
return -ENOMEM;
-
- data = req->async_data;
data->req = req;
data->flags = flags;
@@ -496,9 +563,7 @@ static int __io_timeout_prep(struct io_kiocb *req,
if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
return -EINVAL;
- INIT_LIST_HEAD(&timeout->list);
data->mode = io_translate_timeout_mode(flags);
- hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
if (is_timeout_link) {
struct io_submit_link *link = &req->ctx->submit_state.link;
@@ -509,6 +574,10 @@ static int __io_timeout_prep(struct io_kiocb *req,
return -EINVAL;
timeout->head = link->last;
link->last->flags |= REQ_F_ARM_LTIMEOUT;
+ hrtimer_setup(&data->timer, io_link_timeout_fn, io_timeout_get_clock(data),
+ data->mode);
+ } else {
+ hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), data->mode);
}
return 0;
}
@@ -531,7 +600,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
struct list_head *entry;
u32 tail, off = timeout->off;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
/*
* sqe->off holds how many events that need to occur for this
@@ -543,7 +612,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
goto add;
}
- tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+ tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
timeout->target_seq = tail + off;
/* Update the last seq here in case io_flush_timeouts() hasn't.
@@ -568,9 +637,8 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
}
add:
list_add(&timeout->list, entry);
- data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
return IOU_ISSUE_SKIP_COMPLETE;
}
@@ -579,7 +647,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
@@ -587,23 +655,22 @@ void io_queue_linked_timeout(struct io_kiocb *req)
if (timeout->head) {
struct io_timeout_data *data = req->async_data;
- data->timer.function = io_link_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
list_add_tail(&timeout->list, &ctx->ltimeout_list);
}
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
/* drop submission reference */
io_put_req(req);
}
-static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
+static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx,
bool cancel_all)
- __must_hold(&req->ctx->timeout_lock)
+ __must_hold(&head->ctx->timeout_lock)
{
struct io_kiocb *req;
- if (task && head->task != task)
+ if (tctx && head->tctx != tctx)
return false;
if (cancel_all)
return true;
@@ -616,26 +683,26 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
}
/* Returns true if we found and killed one or more timeouts */
-__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
bool cancel_all)
{
struct io_timeout *timeout, *tmp;
- int canceled = 0;
+ LIST_HEAD(list);
/*
* completion_lock is needed for io_match_task(). Take it before
* timeout_lockfirst to keep locking ordering.
*/
spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
+ raw_spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
- if (io_match_task(req, tsk, cancel_all) &&
- io_kill_timeout(req, -ECANCELED))
- canceled++;
+ if (io_match_task(req, tctx, cancel_all))
+ io_kill_timeout(req, &list);
}
- spin_unlock_irq(&ctx->timeout_lock);
+ raw_spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
- return canceled != 0;
+
+ return io_flush_killed_timeouts(&list, -ECANCELED);
}
diff --git a/io_uring/timeout.h b/io_uring/timeout.h
index a6939f18313e..2b7c9ad72992 100644
--- a/io_uring/timeout.h
+++ b/io_uring/timeout.h
@@ -8,23 +8,10 @@ struct io_timeout_data {
u32 flags;
};
-struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
- struct io_kiocb *link);
-
-static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req)
-{
- struct io_kiocb *link = req->link;
-
- if (link && link->opcode == IORING_OP_LINK_TIMEOUT)
- return __io_disarm_linked_timeout(req, link);
-
- return NULL;
-}
-
__cold void io_flush_timeouts(struct io_ring_ctx *ctx);
struct io_cancel_data;
int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
-__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+__cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
bool cancel_all);
void io_queue_linked_timeout(struct io_kiocb *req);
void io_disarm_next(struct io_kiocb *req);
diff --git a/io_uring/truncate.c b/io_uring/truncate.c
new file mode 100644
index 000000000000..487baf23b44e
--- /dev/null
+++ b/io_uring/truncate.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "../fs/internal.h"
+
+#include "io_uring.h"
+#include "truncate.h"
+
+struct io_ftrunc {
+ struct file *file;
+ loff_t len;
+};
+
+int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_ftrunc *ft = io_kiocb_to_cmd(req, struct io_ftrunc);
+
+ if (sqe->rw_flags || sqe->addr || sqe->len || sqe->buf_index ||
+ sqe->splice_fd_in || sqe->addr3)
+ return -EINVAL;
+
+ ft->len = READ_ONCE(sqe->off);
+
+ req->flags |= REQ_F_FORCE_ASYNC;
+ return 0;
+}
+
+int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_ftrunc *ft = io_kiocb_to_cmd(req, struct io_ftrunc);
+ int ret;
+
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
+
+ ret = do_ftruncate(req->file, ft->len, 1);
+
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+}
diff --git a/io_uring/truncate.h b/io_uring/truncate.h
new file mode 100644
index 000000000000..ec088293a478
--- /dev/null
+++ b/io_uring/truncate.h
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
+
+int io_ftruncate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 446a189b78b0..197474911f04 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -2,105 +2,231 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/file.h>
-#include <linux/io_uring.h>
+#include <linux/io_uring/cmd.h>
#include <linux/security.h>
#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
#include "io_uring.h"
+#include "alloc_cache.h"
#include "rsrc.h"
+#include "kbuf.h"
#include "uring_cmd.h"
+#include "poll.h"
-static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
+void io_cmd_cache_free(const void *entry)
+{
+ struct io_async_cmd *ac = (struct io_async_cmd *)entry;
+
+ io_vec_free(&ac->vec);
+ kfree(ac);
+}
+
+static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac = req->async_data;
+
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ return;
+
+ io_alloc_cache_vec_kasan(&ac->vec);
+ if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&ac->vec);
+
+ if (io_alloc_cache_put(&req->ctx->cmd_cache, ac)) {
+ ioucmd->sqe = NULL;
+ io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
+ }
+}
- ioucmd->task_work_cb(ioucmd);
+void io_uring_cmd_cleanup(struct io_kiocb *req)
+{
+ io_req_uring_cleanup(req, 0);
}
-void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *))
+bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx, bool cancel_all)
+{
+ struct hlist_node *tmp;
+ struct io_kiocb *req;
+ bool ret = false;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
+ hash_node) {
+ struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
+ struct io_uring_cmd);
+ struct file *file = req->file;
+
+ if (!cancel_all && req->tctx != tctx)
+ continue;
+
+ if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+ file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
+ IO_URING_F_COMPLETE_DEFER);
+ ret = true;
+ }
+ }
+ io_submit_flush_completions(ctx);
+ return ret;
+}
+
+static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
+ return;
+
+ cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
+ io_ring_submit_lock(ctx, issue_flags);
+ hlist_del(&req->hash_node);
+ io_ring_submit_unlock(ctx, issue_flags);
+}
+
+/*
+ * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
+ * will try to cancel this issued command by sending ->uring_cmd() with
+ * issue_flags of IO_URING_F_CANCEL.
+ *
+ * The command is guaranteed to not be done when calling ->uring_cmd()
+ * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
+ * with race between io_uring canceling and normal completion.
+ */
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
+ cmd->flags |= IORING_URING_CMD_CANCELABLE;
+ io_ring_submit_lock(ctx, issue_flags);
+ hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
+ io_ring_submit_unlock(ctx, issue_flags);
+ }
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
+
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb,
+ unsigned flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- ioucmd->task_work_cb = task_work_cb;
- req->io_task_work.func = io_uring_cmd_work;
- io_req_task_work_add(req);
+ if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
+ return;
+
+ req->io_task_work.func = task_work_cb;
+ __io_req_task_work_add(req, flags);
}
-EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
+EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
u64 extra1, u64 extra2)
{
- req->extra1 = extra1;
- req->extra2 = extra2;
- req->flags |= REQ_F_CQE32_INIT;
+ req->big_cqe.extra1 = extra1;
+ req->big_cqe.extra2 = extra2;
}
/*
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
*/
-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
+void __io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ if (WARN_ON_ONCE(req->flags & REQ_F_APOLL_MULTISHOT))
+ return;
+
+ io_uring_cmd_del_cancelable(ioucmd, issue_flags);
+
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
- if (req->ctx->flags & IORING_SETUP_CQE32)
+ if (is_cqe32) {
+ if (req->ctx->flags & IORING_SETUP_CQE_MIXED)
+ req->cqe.flags |= IORING_CQE_F_32;
io_req_set_cqe32_extra(req, res2, 0);
- if (req->ctx->flags & IORING_SETUP_IOPOLL)
+ }
+ io_req_uring_cleanup(req, issue_flags);
+ if (req->ctx->flags & IORING_SETUP_IOPOLL) {
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
smp_store_release(&req->iopoll_completed, 1);
- else
- io_req_complete_post(req, 0);
-}
-EXPORT_SYMBOL_GPL(io_uring_cmd_done);
-
-int io_uring_cmd_prep_async(struct io_kiocb *req)
-{
- struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- size_t cmd_size;
-
- BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
- BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
-
- cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
-
- memcpy(req->async_data, ioucmd->cmd, cmd_size);
- return 0;
+ } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
+ if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
+ return;
+ io_req_complete_defer(req);
+ } else {
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req);
+ }
}
+EXPORT_SYMBOL_GPL(__io_uring_cmd_done);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac;
if (sqe->__pad1)
return -EINVAL;
ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
- if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
+ if (ioucmd->flags & ~IORING_URING_CMD_MASK)
return -EINVAL;
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
- struct io_ring_ctx *ctx = req->ctx;
- u16 index;
-
+ if (ioucmd->flags & IORING_URING_CMD_MULTISHOT)
+ return -EINVAL;
req->buf_index = READ_ONCE(sqe->buf_index);
- if (unlikely(req->buf_index >= ctx->nr_user_bufs))
- return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
- req->imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
}
- ioucmd->cmd = sqe->cmd;
+
+ if (!!(ioucmd->flags & IORING_URING_CMD_MULTISHOT) !=
+ !!(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
+
+ ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
+ if (!ac)
+ return -ENOMEM;
+ ioucmd->sqe = sqe;
return 0;
}
+/*
+ * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
+ * slot.
+ */
+static inline size_t uring_sqe_size(struct io_kiocb *req)
+{
+ if (req->ctx->flags & IORING_SETUP_SQE128 ||
+ req->opcode == IORING_OP_URING_CMD128)
+ return 2 * sizeof(struct io_uring_sqe);
+ return sizeof(struct io_uring_sqe);
+}
+
+void io_uring_cmd_sqe_copy(struct io_kiocb *req)
+{
+ struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
+ struct io_async_cmd *ac = req->async_data;
+
+ /* Should not happen, as REQ_F_SQE_COPIED covers this */
+ if (WARN_ON_ONCE(ioucmd->sqe == ac->sqes))
+ return;
+ memcpy(ac->sqes, ioucmd->sqe, uring_sqe_size(req));
+ ioucmd->sqe = ac->sqes;
+}
+
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
@@ -108,51 +234,167 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
struct file *file = req->file;
int ret;
- if (!req->file->f_op->uring_cmd)
+ if (!file->f_op->uring_cmd)
return -EOPNOTSUPP;
ret = security_uring_cmd(ioucmd);
if (ret)
return ret;
- if (ctx->flags & IORING_SETUP_SQE128)
+ if (ctx->flags & IORING_SETUP_SQE128 ||
+ req->opcode == IORING_OP_URING_CMD128)
issue_flags |= IO_URING_F_SQE128;
- if (ctx->flags & IORING_SETUP_CQE32)
+ if (ctx->flags & (IORING_SETUP_CQE32 | IORING_SETUP_CQE_MIXED))
issue_flags |= IO_URING_F_CQE32;
+ if (io_is_compat(ctx))
+ issue_flags |= IO_URING_F_COMPAT;
if (ctx->flags & IORING_SETUP_IOPOLL) {
+ if (!file->f_op->uring_cmd_iopoll)
+ return -EOPNOTSUPP;
issue_flags |= IO_URING_F_IOPOLL;
req->iopoll_completed = 0;
- WRITE_ONCE(ioucmd->cookie, NULL);
+ if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
+ /* make sure every req only blocks once */
+ req->flags &= ~REQ_F_IOPOLL_STATE;
+ req->iopoll_start = ktime_get_ns();
+ }
}
- if (req_has_async_data(req))
- ioucmd->cmd = req->async_data;
-
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN) {
- if (!req_has_async_data(req)) {
- if (io_alloc_async_data(req))
- return -ENOMEM;
- io_uring_cmd_prep_async(req);
- }
- return -EAGAIN;
+ if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) {
+ if (ret >= 0)
+ return IOU_ISSUE_SKIP_COMPLETE;
}
-
- if (ret != -EIOCBQUEUED) {
- if (ret < 0)
- req_set_fail(req);
- io_req_set_res(req, ret, 0);
+ if (ret == -EAGAIN) {
+ ioucmd->flags |= IORING_URING_CMD_REISSUE;
return ret;
}
-
- return IOU_ISSUE_SKIP_COMPLETE;
+ if (ret == -EIOCBQUEUED)
+ return ret;
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_uring_cleanup(req, issue_flags);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
}
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
- struct iov_iter *iter, void *ioucmd)
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
{
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
- return io_import_fixed(rw, iter, req->imu, ubuf, len);
+ if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
+ return -EINVAL;
+
+ return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
+
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ struct io_async_cmd *ac = req->async_data;
+ int ret;
+
+ if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
+ return -EINVAL;
+
+ ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
+ if (ret)
+ return ret;
+
+ return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
+ issue_flags);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
+
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+ io_req_queue_iowq(req);
+}
+
+int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, __poll_t mask)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+ int ret;
+
+ if (likely(req->flags & REQ_F_APOLL_MULTISHOT))
+ return 0;
+
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+ mask &= ~EPOLLONESHOT;
+
+ ret = io_arm_apoll(req, issue_flags, mask);
+ return ret == IO_APOLL_OK ? -EIOCBQUEUED : -ECANCELED;
+}
+
+bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ struct io_uring_cqe cqe[2])
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+
+ if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_MULTISHOT)))
+ return false;
+ return io_req_post_cqe32(req, cqe);
+}
+
+/*
+ * Work with io_uring_mshot_cmd_post_cqe() together for committing the
+ * provided buffer upfront
+ */
+struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
+ unsigned buf_group, size_t *len,
+ unsigned int issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+
+ if (!(ioucmd->flags & IORING_URING_CMD_MULTISHOT))
+ return (struct io_br_sel) { .val = -EINVAL };
+
+ if (WARN_ON_ONCE(!io_do_buffer_select(req)))
+ return (struct io_br_sel) { .val = -EINVAL };
+
+ return io_buffer_select(req, len, buf_group, issue_flags);
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_buffer_select);
+
+/*
+ * Return true if this multishot uring_cmd needs to be completed, otherwise
+ * the event CQE is posted successfully.
+ *
+ * This function must use `struct io_br_sel` returned from
+ * io_uring_cmd_buffer_select() for committing the buffer in the same
+ * uring_cmd submission context.
+ */
+bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ unsigned int cflags = 0;
+
+ if (!(ioucmd->flags & IORING_URING_CMD_MULTISHOT))
+ return true;
+
+ if (sel->val > 0) {
+ cflags = io_put_kbuf(req, sel->val, sel->buf_list);
+ if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE))
+ return false;
+ }
+
+ io_kbuf_recycle(req, sel->buf_list, issue_flags);
+ if (sel->val < 0)
+ req_set_fail(req);
+ io_req_set_res(req, sel->val, cflags);
+ return true;
+}
+EXPORT_SYMBOL_GPL(io_uring_mshot_cmd_post_cqe);
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h
index 7c6697d13cb2..041aef8a8aa3 100644
--- a/io_uring/uring_cmd.h
+++ b/io_uring/uring_cmd.h
@@ -1,13 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/io_uring/cmd.h>
+#include <linux/io_uring_types.h>
+
+struct io_async_cmd {
+ struct iou_vec vec;
+ struct io_uring_sqe sqes[2];
+};
+
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags);
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
-int io_uring_cmd_prep_async(struct io_kiocb *req);
-
-/*
- * The URING_CMD payload starts at 'cmd' in the first sqe, and continues into
- * the following sqe if SQE128 is used.
- */
-#define uring_cmd_pdu_size(is_sqe128) \
- ((1 + !!(is_sqe128)) * sizeof(struct io_uring_sqe) - \
- offsetof(struct io_uring_sqe, cmd))
+void io_uring_cmd_sqe_copy(struct io_kiocb *req);
+void io_uring_cmd_cleanup(struct io_kiocb *req);
+
+bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+ struct io_uring_task *tctx, bool cancel_all);
+
+bool io_uring_cmd_post_mshot_cqe32(struct io_uring_cmd *cmd,
+ unsigned int issue_flags,
+ struct io_uring_cqe cqe[2]);
+
+void io_cmd_cache_free(const void *entry);
+
+int io_cmd_poll_multishot(struct io_uring_cmd *cmd,
+ unsigned int issue_flags, __poll_t mask);
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
new file mode 100644
index 000000000000..2d4cbd47c67c
--- /dev/null
+++ b/io_uring/waitid.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for async notification of waitid
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/compat.h>
+#include <linux/io_uring.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "io_uring.h"
+#include "cancel.h"
+#include "waitid.h"
+#include "../kernel/exit.h"
+
+static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw);
+
+#define IO_WAITID_CANCEL_FLAG BIT(31)
+#define IO_WAITID_REF_MASK GENMASK(30, 0)
+
+struct io_waitid {
+ struct file *file;
+ int which;
+ pid_t upid;
+ int options;
+ atomic_t refs;
+ struct wait_queue_head *head;
+ struct siginfo __user *infop;
+ struct waitid_info info;
+};
+
+static void io_waitid_free(struct io_kiocb *req)
+{
+ struct io_waitid_async *iwa = req->async_data;
+
+ put_pid(iwa->wo.wo_pid);
+ io_req_async_data_free(req);
+}
+
+static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)
+{
+ struct compat_siginfo __user *infop;
+ bool ret;
+
+ infop = (struct compat_siginfo __user *) iw->infop;
+
+ if (!user_write_access_begin(infop, sizeof(*infop)))
+ return false;
+
+ unsafe_put_user(signo, &infop->si_signo, Efault);
+ unsafe_put_user(0, &infop->si_errno, Efault);
+ unsafe_put_user(iw->info.cause, &infop->si_code, Efault);
+ unsafe_put_user(iw->info.pid, &infop->si_pid, Efault);
+ unsafe_put_user(iw->info.uid, &infop->si_uid, Efault);
+ unsafe_put_user(iw->info.status, &infop->si_status, Efault);
+ ret = true;
+done:
+ user_write_access_end();
+ return ret;
+Efault:
+ ret = false;
+ goto done;
+}
+
+static bool io_waitid_copy_si(struct io_kiocb *req, int signo)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ bool ret;
+
+ if (!iw->infop)
+ return true;
+
+ if (io_is_compat(req->ctx))
+ return io_waitid_compat_copy_si(iw, signo);
+
+ if (!user_write_access_begin(iw->infop, sizeof(*iw->infop)))
+ return false;
+
+ unsafe_put_user(signo, &iw->infop->si_signo, Efault);
+ unsafe_put_user(0, &iw->infop->si_errno, Efault);
+ unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault);
+ unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault);
+ unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault);
+ unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault);
+ ret = true;
+done:
+ user_write_access_end();
+ return ret;
+Efault:
+ ret = false;
+ goto done;
+}
+
+static int io_waitid_finish(struct io_kiocb *req, int ret)
+{
+ int signo = 0;
+
+ if (ret > 0) {
+ signo = SIGCHLD;
+ ret = 0;
+ }
+
+ if (!io_waitid_copy_si(req, signo))
+ ret = -EFAULT;
+ io_waitid_free(req);
+ return ret;
+}
+
+static void io_waitid_remove_wq(struct io_kiocb *req)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct wait_queue_head *head;
+
+ head = READ_ONCE(iw->head);
+ if (head) {
+ struct io_waitid_async *iwa = req->async_data;
+
+ iw->head = NULL;
+ spin_lock_irq(&head->lock);
+ list_del_init(&iwa->wo.child_wait.entry);
+ spin_unlock_irq(&head->lock);
+ }
+}
+
+static void io_waitid_complete(struct io_kiocb *req, int ret)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+
+ /* anyone completing better be holding a reference */
+ WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
+
+ lockdep_assert_held(&req->ctx->uring_lock);
+
+ hlist_del_init(&req->hash_node);
+ io_waitid_remove_wq(req);
+
+ ret = io_waitid_finish(req, ret);
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+}
+
+static bool __io_waitid_cancel(struct io_kiocb *req)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+
+ lockdep_assert_held(&req->ctx->uring_lock);
+
+ /*
+ * Mark us canceled regardless of ownership. This will prevent a
+ * potential retry from a spurious wakeup.
+ */
+ atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
+
+ /* claim ownership */
+ if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
+ return false;
+
+ io_waitid_complete(req, -ECANCELED);
+ io_req_queue_tw_complete(req, -ECANCELED);
+ return true;
+}
+
+int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags)
+{
+ return io_cancel_remove(ctx, cd, issue_flags, &ctx->waitid_list, __io_waitid_cancel);
+}
+
+bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ bool cancel_all)
+{
+ return io_cancel_remove_all(ctx, tctx, &ctx->waitid_list, cancel_all, __io_waitid_cancel);
+}
+
+static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+
+ if (!atomic_sub_return(1, &iw->refs))
+ return false;
+
+ io_waitid_remove_wq(req);
+
+ /*
+ * Wakeup triggered, racing with us. It was prevented from
+ * completing because of that, queue up the tw to do that.
+ */
+ req->io_task_work.func = io_waitid_cb;
+ io_req_task_work_add(req);
+ return true;
+}
+
+static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw)
+{
+ struct io_kiocb *req = tw_req.req;
+ struct io_waitid_async *iwa = req->async_data;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ io_tw_lock(ctx, tw);
+
+ ret = __do_wait(&iwa->wo);
+
+ /*
+ * If we get -ERESTARTSYS here, we need to re-arm and check again
+ * to ensure we get another callback. If the retry works, then we can
+ * just remove ourselves from the waitqueue again and finish the
+ * request.
+ */
+ if (unlikely(ret == -ERESTARTSYS)) {
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+
+ /* Don't retry if cancel found it meanwhile */
+ ret = -ECANCELED;
+ if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
+ iw->head = &current->signal->wait_chldexit;
+ add_wait_queue(iw->head, &iwa->wo.child_wait);
+ ret = __do_wait(&iwa->wo);
+ if (ret == -ERESTARTSYS) {
+ /* retry armed, drop our ref */
+ io_waitid_drop_issue_ref(req);
+ return;
+ }
+ /* fall through to complete, will kill waitqueue */
+ }
+ }
+
+ io_waitid_complete(req, ret);
+ io_req_task_complete(tw_req, tw);
+}
+
+static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait);
+ struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo);
+ struct io_kiocb *req = iwa->req;
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct task_struct *p = key;
+
+ if (!pid_child_should_wake(wo, p))
+ return 0;
+
+ list_del_init(&wait->entry);
+ iw->head = NULL;
+
+ /* cancel is in progress */
+ if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
+ return 1;
+
+ req->io_task_work.func = io_waitid_cb;
+ io_req_task_work_add(req);
+ return 1;
+}
+
+int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct io_waitid_async *iwa;
+
+ if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
+ return -EINVAL;
+
+ iwa = io_uring_alloc_async_data(NULL, req);
+ if (unlikely(!iwa))
+ return -ENOMEM;
+ iwa->req = req;
+
+ iw->which = READ_ONCE(sqe->len);
+ iw->upid = READ_ONCE(sqe->fd);
+ iw->options = READ_ONCE(sqe->file_index);
+ iw->head = NULL;
+ iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ return 0;
+}
+
+int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
+ struct io_waitid_async *iwa = req->async_data;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
+ iw->options, NULL);
+ if (ret)
+ goto done;
+
+ /*
+ * Mark the request as busy upfront, in case we're racing with the
+ * wakeup. If we are, then we'll notice when we drop this initial
+ * reference again after arming.
+ */
+ atomic_set(&iw->refs, 1);
+
+ /*
+ * Cancel must hold the ctx lock, so there's no risk of cancelation
+ * finding us until a) we remain on the list, and b) the lock is
+ * dropped. We only need to worry about racing with the wakeup
+ * callback.
+ */
+ io_ring_submit_lock(ctx, issue_flags);
+
+ /*
+ * iw->head is valid under the ring lock, and as long as the request
+ * is on the waitid_list where cancelations may find it.
+ */
+ iw->head = &current->signal->wait_chldexit;
+ hlist_add_head(&req->hash_node, &ctx->waitid_list);
+
+ init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
+ iwa->wo.child_wait.private = req->tctx->task;
+ add_wait_queue(iw->head, &iwa->wo.child_wait);
+
+ ret = __do_wait(&iwa->wo);
+ if (ret == -ERESTARTSYS) {
+ /*
+ * Nobody else grabbed a reference, it'll complete when we get
+ * a waitqueue callback, or if someone cancels it.
+ */
+ if (!io_waitid_drop_issue_ref(req)) {
+ io_ring_submit_unlock(ctx, issue_flags);
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+
+ /*
+ * Wakeup triggered, racing with us. It was prevented from
+ * completing because of that, queue up the tw to do that.
+ */
+ io_ring_submit_unlock(ctx, issue_flags);
+ return IOU_ISSUE_SKIP_COMPLETE;
+ }
+
+ hlist_del_init(&req->hash_node);
+ io_waitid_remove_wq(req);
+ ret = io_waitid_finish(req, ret);
+
+ io_ring_submit_unlock(ctx, issue_flags);
+done:
+ if (ret < 0)
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+}
diff --git a/io_uring/waitid.h b/io_uring/waitid.h
new file mode 100644
index 000000000000..d5544aaf302a
--- /dev/null
+++ b/io_uring/waitid.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "../kernel/exit.h"
+
+struct io_waitid_async {
+ struct io_kiocb *req;
+ struct wait_opts wo;
+};
+
+int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_waitid(struct io_kiocb *req, unsigned int issue_flags);
+int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned int issue_flags);
+bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
+ bool cancel_all);
diff --git a/io_uring/xattr.c b/io_uring/xattr.c
index 6201a9f442c6..322b94ff9e4b 100644
--- a/io_uring/xattr.c
+++ b/io_uring/xattr.c
@@ -18,7 +18,7 @@
struct io_xattr {
struct file *file;
- struct xattr_ctx ctx;
+ struct kernel_xattr_ctx ctx;
struct filename *filename;
};
@@ -48,13 +48,10 @@ static int __io_getxattr_prep(struct io_kiocb *req,
const char __user *name;
int ret;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
ix->filename = NULL;
ix->ctx.kvalue = NULL;
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
- ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ ix->ctx.value = u64_to_user_ptr(READ_ONCE(sqe->addr2));
ix->ctx.size = READ_ONCE(sqe->len);
ix->ctx.flags = READ_ONCE(sqe->xattr_flags);
@@ -65,16 +62,14 @@ static int __io_getxattr_prep(struct io_kiocb *req,
if (!ix->ctx.kname)
return -ENOMEM;
- ret = strncpy_from_user(ix->ctx.kname->name, name,
- sizeof(ix->ctx.kname->name));
- if (!ret || ret == sizeof(ix->ctx.kname->name))
- ret = -ERANGE;
- if (ret < 0) {
+ ret = import_xattr_name(ix->ctx.kname, name);
+ if (ret) {
kfree(ix->ctx.kname);
return ret;
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -89,19 +84,20 @@ int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
const char __user *path;
int ret;
+ if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ return -EBADF;
+
ret = __io_getxattr_prep(req, sqe);
if (ret)
return ret;
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
- ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
- if (IS_ERR(ix->filename)) {
- ret = PTR_ERR(ix->filename);
- ix->filename = NULL;
- }
+ ix->filename = getname(path);
+ if (IS_ERR(ix->filename))
+ return PTR_ERR(ix->filename);
- return ret;
+ return 0;
}
int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
@@ -109,41 +105,24 @@ int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags)
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
- ret = do_getxattr(mnt_idmap(req->file->f_path.mnt),
- req->file->f_path.dentry,
- &ix->ctx);
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
+ ret = file_getxattr(req->file, &ix->ctx);
io_xattr_finish(req, ret);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_getxattr(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
- unsigned int lookup_flags = LOOKUP_FOLLOW;
- struct path path;
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
-retry:
- ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
- if (!ret) {
- ret = do_getxattr(mnt_idmap(path.mnt), path.dentry, &ix->ctx);
-
- path_put(&path);
- if (retry_estale(ret, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- }
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
+ ret = filename_getxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx);
+ ix->filename = NULL;
io_xattr_finish(req, ret);
- return IOU_OK;
+ return IOU_COMPLETE;
}
static int __io_setxattr_prep(struct io_kiocb *req,
@@ -153,9 +132,6 @@ static int __io_setxattr_prep(struct io_kiocb *req,
const char __user *name;
int ret;
- if (unlikely(req->flags & REQ_F_FIXED_FILE))
- return -EBADF;
-
ix->filename = NULL;
name = u64_to_user_ptr(READ_ONCE(sqe->addr));
ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2));
@@ -174,6 +150,7 @@ static int __io_setxattr_prep(struct io_kiocb *req,
}
req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_FORCE_ASYNC;
return 0;
}
@@ -183,19 +160,20 @@ int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
const char __user *path;
int ret;
+ if (unlikely(req->flags & REQ_F_FIXED_FILE))
+ return -EBADF;
+
ret = __io_setxattr_prep(req, sqe);
if (ret)
return ret;
path = u64_to_user_ptr(READ_ONCE(sqe->addr3));
- ix->filename = getname_flags(path, LOOKUP_FOLLOW, NULL);
- if (IS_ERR(ix->filename)) {
- ret = PTR_ERR(ix->filename);
- ix->filename = NULL;
- }
+ ix->filename = getname(path);
+ if (IS_ERR(ix->filename))
+ return PTR_ERR(ix->filename);
- return ret;
+ return 0;
}
int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -203,54 +181,27 @@ int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return __io_setxattr_prep(req, sqe);
}
-static int __io_setxattr(struct io_kiocb *req, unsigned int issue_flags,
- const struct path *path)
-{
- struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
- int ret;
-
- ret = mnt_want_write(path->mnt);
- if (!ret) {
- ret = do_setxattr(mnt_idmap(path->mnt), path->dentry, &ix->ctx);
- mnt_drop_write(path->mnt);
- }
-
- return ret;
-}
-
int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags)
{
+ struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
- ret = __io_setxattr(req, issue_flags, &req->file->f_path);
+ ret = file_setxattr(req->file, &ix->ctx);
io_xattr_finish(req, ret);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_setxattr(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr);
- unsigned int lookup_flags = LOOKUP_FOLLOW;
- struct path path;
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
-retry:
- ret = filename_lookup(AT_FDCWD, ix->filename, lookup_flags, &path, NULL);
- if (!ret) {
- ret = __io_setxattr(req, issue_flags, &path);
- path_put(&path);
- if (retry_estale(ret, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- }
+ WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
+ ret = filename_setxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx);
+ ix->filename = NULL;
io_xattr_finish(req, ret);
- return IOU_OK;
+ return IOU_COMPLETE;
}
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
new file mode 100644
index 000000000000..b99cf2c6670a
--- /dev/null
+++ b/io_uring/zcrx.c
@@ -0,0 +1,1485 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/dma-map-ops.h>
+#include <linux/mm.h>
+#include <linux/nospec.h>
+#include <linux/io_uring.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/skbuff_ref.h>
+#include <linux/anon_inodes.h>
+
+#include <net/page_pool/helpers.h>
+#include <net/page_pool/memory_provider.h>
+#include <net/netlink.h>
+#include <net/netdev_queues.h>
+#include <net/netdev_rx_queue.h>
+#include <net/tcp.h>
+#include <net/rps.h>
+
+#include <trace/events/page_pool.h>
+
+#include <uapi/linux/io_uring.h>
+
+#include "io_uring.h"
+#include "kbuf.h"
+#include "memmap.h"
+#include "zcrx.h"
+#include "rsrc.h"
+
+#define IO_ZCRX_AREA_SUPPORTED_FLAGS (IORING_ZCRX_AREA_DMABUF)
+
+#define IO_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+static inline struct io_zcrx_ifq *io_pp_to_ifq(struct page_pool *pp)
+{
+ return pp->mp_priv;
+}
+
+static inline struct io_zcrx_area *io_zcrx_iov_to_area(const struct net_iov *niov)
+{
+ struct net_iov_area *owner = net_iov_owner(niov);
+
+ return container_of(owner, struct io_zcrx_area, nia);
+}
+
+static inline struct page *io_zcrx_iov_page(const struct net_iov *niov)
+{
+ struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
+ unsigned niov_pages_shift;
+
+ lockdep_assert(!area->mem.is_dmabuf);
+
+ niov_pages_shift = area->ifq->niov_shift - PAGE_SHIFT;
+ return area->mem.pages[net_iov_idx(niov) << niov_pages_shift];
+}
+
+static int io_populate_area_dma(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_area *area)
+{
+ unsigned niov_size = 1U << ifq->niov_shift;
+ struct sg_table *sgt = area->mem.sgt;
+ struct scatterlist *sg;
+ unsigned i, niov_idx = 0;
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ dma_addr_t dma = sg_dma_address(sg);
+ unsigned long sg_len = sg_dma_len(sg);
+
+ if (WARN_ON_ONCE(sg_len % niov_size))
+ return -EINVAL;
+
+ while (sg_len && niov_idx < area->nia.num_niovs) {
+ struct net_iov *niov = &area->nia.niovs[niov_idx];
+
+ if (net_mp_niov_set_dma_addr(niov, dma))
+ return -EFAULT;
+ sg_len -= niov_size;
+ dma += niov_size;
+ niov_idx++;
+ }
+ }
+
+ if (WARN_ON_ONCE(niov_idx != area->nia.num_niovs))
+ return -EFAULT;
+ return 0;
+}
+
+static void io_release_dmabuf(struct io_zcrx_mem *mem)
+{
+ if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ return;
+
+ if (mem->sgt)
+ dma_buf_unmap_attachment_unlocked(mem->attach, mem->sgt,
+ DMA_FROM_DEVICE);
+ if (mem->attach)
+ dma_buf_detach(mem->dmabuf, mem->attach);
+ if (mem->dmabuf)
+ dma_buf_put(mem->dmabuf);
+
+ mem->sgt = NULL;
+ mem->attach = NULL;
+ mem->dmabuf = NULL;
+}
+
+static int io_import_dmabuf(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_mem *mem,
+ struct io_uring_zcrx_area_reg *area_reg)
+{
+ unsigned long off = (unsigned long)area_reg->addr;
+ unsigned long len = (unsigned long)area_reg->len;
+ unsigned long total_size = 0;
+ struct scatterlist *sg;
+ int dmabuf_fd = area_reg->dmabuf_fd;
+ int i, ret;
+
+ if (off)
+ return -EINVAL;
+ if (WARN_ON_ONCE(!ifq->dev))
+ return -EFAULT;
+ if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ return -EINVAL;
+
+ mem->is_dmabuf = true;
+ mem->dmabuf = dma_buf_get(dmabuf_fd);
+ if (IS_ERR(mem->dmabuf)) {
+ ret = PTR_ERR(mem->dmabuf);
+ mem->dmabuf = NULL;
+ goto err;
+ }
+
+ mem->attach = dma_buf_attach(mem->dmabuf, ifq->dev);
+ if (IS_ERR(mem->attach)) {
+ ret = PTR_ERR(mem->attach);
+ mem->attach = NULL;
+ goto err;
+ }
+
+ mem->sgt = dma_buf_map_attachment_unlocked(mem->attach, DMA_FROM_DEVICE);
+ if (IS_ERR(mem->sgt)) {
+ ret = PTR_ERR(mem->sgt);
+ mem->sgt = NULL;
+ goto err;
+ }
+
+ for_each_sgtable_dma_sg(mem->sgt, sg, i)
+ total_size += sg_dma_len(sg);
+
+ if (total_size != len) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mem->size = len;
+ return 0;
+err:
+ io_release_dmabuf(mem);
+ return ret;
+}
+
+static unsigned long io_count_account_pages(struct page **pages, unsigned nr_pages)
+{
+ struct folio *last_folio = NULL;
+ unsigned long res = 0;
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct folio *folio = page_folio(pages[i]);
+
+ if (folio == last_folio)
+ continue;
+ last_folio = folio;
+ res += folio_nr_pages(folio);
+ }
+ return res;
+}
+
+static int io_import_umem(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_mem *mem,
+ struct io_uring_zcrx_area_reg *area_reg)
+{
+ struct page **pages;
+ int nr_pages, ret;
+
+ if (area_reg->dmabuf_fd)
+ return -EINVAL;
+ if (!area_reg->addr)
+ return -EFAULT;
+ pages = io_pin_pages((unsigned long)area_reg->addr, area_reg->len,
+ &nr_pages);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
+ 0, nr_pages << PAGE_SHIFT,
+ GFP_KERNEL_ACCOUNT);
+ if (ret) {
+ unpin_user_pages(pages, nr_pages);
+ return ret;
+ }
+
+ mem->account_pages = io_count_account_pages(pages, nr_pages);
+ ret = io_account_mem(ifq->user, ifq->mm_account, mem->account_pages);
+ if (ret < 0)
+ mem->account_pages = 0;
+
+ mem->sgt = &mem->page_sg_table;
+ mem->pages = pages;
+ mem->nr_folios = nr_pages;
+ mem->size = area_reg->len;
+ return ret;
+}
+
+static void io_release_area_mem(struct io_zcrx_mem *mem)
+{
+ if (mem->is_dmabuf) {
+ io_release_dmabuf(mem);
+ return;
+ }
+ if (mem->pages) {
+ unpin_user_pages(mem->pages, mem->nr_folios);
+ sg_free_table(mem->sgt);
+ mem->sgt = NULL;
+ kvfree(mem->pages);
+ }
+}
+
+static int io_import_area(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_mem *mem,
+ struct io_uring_zcrx_area_reg *area_reg)
+{
+ int ret;
+
+ if (area_reg->flags & ~IO_ZCRX_AREA_SUPPORTED_FLAGS)
+ return -EINVAL;
+ if (area_reg->rq_area_token)
+ return -EINVAL;
+ if (area_reg->__resv2[0] || area_reg->__resv2[1])
+ return -EINVAL;
+
+ ret = io_validate_user_buf_range(area_reg->addr, area_reg->len);
+ if (ret)
+ return ret;
+ if (area_reg->addr & ~PAGE_MASK || area_reg->len & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (area_reg->flags & IORING_ZCRX_AREA_DMABUF)
+ return io_import_dmabuf(ifq, mem, area_reg);
+ return io_import_umem(ifq, mem, area_reg);
+}
+
+static void io_zcrx_unmap_area(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_area *area)
+{
+ int i;
+
+ guard(mutex)(&ifq->pp_lock);
+ if (!area->is_mapped)
+ return;
+ area->is_mapped = false;
+
+ for (i = 0; i < area->nia.num_niovs; i++)
+ net_mp_niov_set_dma_addr(&area->nia.niovs[i], 0);
+
+ if (area->mem.is_dmabuf) {
+ io_release_dmabuf(&area->mem);
+ } else {
+ dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
+ DMA_FROM_DEVICE, IO_DMA_ATTR);
+ }
+}
+
+static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
+{
+ int ret;
+
+ guard(mutex)(&ifq->pp_lock);
+ if (area->is_mapped)
+ return 0;
+
+ if (!area->mem.is_dmabuf) {
+ ret = dma_map_sgtable(ifq->dev, &area->mem.page_sg_table,
+ DMA_FROM_DEVICE, IO_DMA_ATTR);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = io_populate_area_dma(ifq, area);
+ if (ret == 0)
+ area->is_mapped = true;
+ return ret;
+}
+
+static void io_zcrx_sync_for_device(struct page_pool *pool,
+ struct net_iov *niov)
+{
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
+ dma_addr_t dma_addr;
+
+ unsigned niov_size;
+
+ if (!dma_dev_need_sync(pool->p.dev))
+ return;
+
+ niov_size = 1U << io_pp_to_ifq(pool)->niov_shift;
+ dma_addr = page_pool_get_dma_addr_netmem(net_iov_to_netmem(niov));
+ __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
+ niov_size, pool->p.dma_dir);
+#endif
+}
+
+#define IO_RQ_MAX_ENTRIES 32768
+
+#define IO_SKBS_PER_CALL_LIMIT 20
+
+struct io_zcrx_args {
+ struct io_kiocb *req;
+ struct io_zcrx_ifq *ifq;
+ struct socket *sock;
+ unsigned nr_skbs;
+};
+
+static const struct memory_provider_ops io_uring_pp_zc_ops;
+
+static inline atomic_t *io_get_user_counter(struct net_iov *niov)
+{
+ struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
+
+ return &area->user_refs[net_iov_idx(niov)];
+}
+
+static bool io_zcrx_put_niov_uref(struct net_iov *niov)
+{
+ atomic_t *uref = io_get_user_counter(niov);
+
+ if (unlikely(!atomic_read(uref)))
+ return false;
+ atomic_dec(uref);
+ return true;
+}
+
+static void io_zcrx_get_niov_uref(struct net_iov *niov)
+{
+ atomic_inc(io_get_user_counter(niov));
+}
+
+static void io_fill_zcrx_offsets(struct io_uring_zcrx_offsets *offsets)
+{
+ offsets->head = offsetof(struct io_uring, head);
+ offsets->tail = offsetof(struct io_uring, tail);
+ offsets->rqes = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
+}
+
+static int io_allocate_rbuf_ring(struct io_ring_ctx *ctx,
+ struct io_zcrx_ifq *ifq,
+ struct io_uring_zcrx_ifq_reg *reg,
+ struct io_uring_region_desc *rd,
+ u32 id)
+{
+ u64 mmap_offset;
+ size_t off, size;
+ void *ptr;
+ int ret;
+
+ io_fill_zcrx_offsets(&reg->offsets);
+ off = reg->offsets.rqes;
+ size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
+ if (size > rd->size)
+ return -EINVAL;
+
+ mmap_offset = IORING_MAP_OFF_ZCRX_REGION;
+ mmap_offset += id << IORING_OFF_PBUF_SHIFT;
+
+ ret = io_create_region(ctx, &ifq->region, rd, mmap_offset);
+ if (ret < 0)
+ return ret;
+
+ ptr = io_region_get_ptr(&ifq->region);
+ ifq->rq_ring = (struct io_uring *)ptr;
+ ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
+
+ return 0;
+}
+
+static void io_free_rbuf_ring(struct io_zcrx_ifq *ifq)
+{
+ io_free_region(ifq->user, &ifq->region);
+ ifq->rq_ring = NULL;
+ ifq->rqes = NULL;
+}
+
+static void io_zcrx_free_area(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_area *area)
+{
+ io_zcrx_unmap_area(ifq, area);
+ io_release_area_mem(&area->mem);
+
+ if (area->mem.account_pages)
+ io_unaccount_mem(ifq->user, ifq->mm_account,
+ area->mem.account_pages);
+
+ kvfree(area->freelist);
+ kvfree(area->nia.niovs);
+ kvfree(area->user_refs);
+ kfree(area);
+}
+
+static int io_zcrx_append_area(struct io_zcrx_ifq *ifq,
+ struct io_zcrx_area *area)
+{
+ if (ifq->area)
+ return -EINVAL;
+ ifq->area = area;
+ return 0;
+}
+
+static int io_zcrx_create_area(struct io_zcrx_ifq *ifq,
+ struct io_uring_zcrx_area_reg *area_reg)
+{
+ struct io_zcrx_area *area;
+ unsigned nr_iovs;
+ int i, ret;
+
+ ret = -ENOMEM;
+ area = kzalloc(sizeof(*area), GFP_KERNEL);
+ if (!area)
+ goto err;
+ area->ifq = ifq;
+
+ ret = io_import_area(ifq, &area->mem, area_reg);
+ if (ret)
+ goto err;
+
+ ifq->niov_shift = PAGE_SHIFT;
+ nr_iovs = area->mem.size >> ifq->niov_shift;
+ area->nia.num_niovs = nr_iovs;
+
+ ret = -ENOMEM;
+ area->nia.niovs = kvmalloc_array(nr_iovs, sizeof(area->nia.niovs[0]),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!area->nia.niovs)
+ goto err;
+
+ area->freelist = kvmalloc_array(nr_iovs, sizeof(area->freelist[0]),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!area->freelist)
+ goto err;
+
+ area->user_refs = kvmalloc_array(nr_iovs, sizeof(area->user_refs[0]),
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!area->user_refs)
+ goto err;
+
+ for (i = 0; i < nr_iovs; i++) {
+ struct net_iov *niov = &area->nia.niovs[i];
+
+ niov->owner = &area->nia;
+ area->freelist[i] = i;
+ atomic_set(&area->user_refs[i], 0);
+ niov->type = NET_IOV_IOURING;
+ }
+
+ area->free_count = nr_iovs;
+ /* we're only supporting one area per ifq for now */
+ area->area_id = 0;
+ area_reg->rq_area_token = (u64)area->area_id << IORING_ZCRX_AREA_SHIFT;
+ spin_lock_init(&area->freelist_lock);
+
+ ret = io_zcrx_append_area(ifq, area);
+ if (!ret)
+ return 0;
+err:
+ if (area)
+ io_zcrx_free_area(ifq, area);
+ return ret;
+}
+
+static struct io_zcrx_ifq *io_zcrx_ifq_alloc(struct io_ring_ctx *ctx)
+{
+ struct io_zcrx_ifq *ifq;
+
+ ifq = kzalloc(sizeof(*ifq), GFP_KERNEL);
+ if (!ifq)
+ return NULL;
+
+ ifq->if_rxq = -1;
+ spin_lock_init(&ifq->rq_lock);
+ mutex_init(&ifq->pp_lock);
+ refcount_set(&ifq->refs, 1);
+ refcount_set(&ifq->user_refs, 1);
+ return ifq;
+}
+
+static void io_zcrx_drop_netdev(struct io_zcrx_ifq *ifq)
+{
+ guard(mutex)(&ifq->pp_lock);
+
+ if (!ifq->netdev)
+ return;
+ netdev_put(ifq->netdev, &ifq->netdev_tracker);
+ ifq->netdev = NULL;
+}
+
+static void io_close_queue(struct io_zcrx_ifq *ifq)
+{
+ struct net_device *netdev;
+ netdevice_tracker netdev_tracker;
+ struct pp_memory_provider_params p = {
+ .mp_ops = &io_uring_pp_zc_ops,
+ .mp_priv = ifq,
+ };
+
+ if (ifq->if_rxq == -1)
+ return;
+
+ scoped_guard(mutex, &ifq->pp_lock) {
+ netdev = ifq->netdev;
+ netdev_tracker = ifq->netdev_tracker;
+ ifq->netdev = NULL;
+ }
+
+ if (netdev) {
+ net_mp_close_rxq(netdev, ifq->if_rxq, &p);
+ netdev_put(netdev, &netdev_tracker);
+ }
+ ifq->if_rxq = -1;
+}
+
+static void io_zcrx_ifq_free(struct io_zcrx_ifq *ifq)
+{
+ io_close_queue(ifq);
+
+ if (ifq->area)
+ io_zcrx_free_area(ifq, ifq->area);
+ free_uid(ifq->user);
+ if (ifq->mm_account)
+ mmdrop(ifq->mm_account);
+ if (ifq->dev)
+ put_device(ifq->dev);
+
+ io_free_rbuf_ring(ifq);
+ mutex_destroy(&ifq->pp_lock);
+ kfree(ifq);
+}
+
+static void io_put_zcrx_ifq(struct io_zcrx_ifq *ifq)
+{
+ if (refcount_dec_and_test(&ifq->refs))
+ io_zcrx_ifq_free(ifq);
+}
+
+static void io_zcrx_return_niov_freelist(struct net_iov *niov)
+{
+ struct io_zcrx_area *area = io_zcrx_iov_to_area(niov);
+
+ spin_lock_bh(&area->freelist_lock);
+ area->freelist[area->free_count++] = net_iov_idx(niov);
+ spin_unlock_bh(&area->freelist_lock);
+}
+
+static void io_zcrx_return_niov(struct net_iov *niov)
+{
+ netmem_ref netmem = net_iov_to_netmem(niov);
+
+ if (!niov->desc.pp) {
+ /* copy fallback allocated niovs */
+ io_zcrx_return_niov_freelist(niov);
+ return;
+ }
+ page_pool_put_unrefed_netmem(niov->desc.pp, netmem, -1, false);
+}
+
+static void io_zcrx_scrub(struct io_zcrx_ifq *ifq)
+{
+ struct io_zcrx_area *area = ifq->area;
+ int i;
+
+ if (!area)
+ return;
+
+ /* Reclaim back all buffers given to the user space. */
+ for (i = 0; i < area->nia.num_niovs; i++) {
+ struct net_iov *niov = &area->nia.niovs[i];
+ int nr;
+
+ if (!atomic_read(io_get_user_counter(niov)))
+ continue;
+ nr = atomic_xchg(io_get_user_counter(niov), 0);
+ if (nr && !page_pool_unref_netmem(net_iov_to_netmem(niov), nr))
+ io_zcrx_return_niov(niov);
+ }
+}
+
+static void zcrx_unregister(struct io_zcrx_ifq *ifq)
+{
+ if (refcount_dec_and_test(&ifq->user_refs)) {
+ io_close_queue(ifq);
+ io_zcrx_scrub(ifq);
+ }
+ io_put_zcrx_ifq(ifq);
+}
+
+struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
+ unsigned int id)
+{
+ struct io_zcrx_ifq *ifq = xa_load(&ctx->zcrx_ctxs, id);
+
+ lockdep_assert_held(&ctx->mmap_lock);
+
+ return ifq ? &ifq->region : NULL;
+}
+
+static int zcrx_box_release(struct inode *inode, struct file *file)
+{
+ struct io_zcrx_ifq *ifq = file->private_data;
+
+ if (WARN_ON_ONCE(!ifq))
+ return -EFAULT;
+ zcrx_unregister(ifq);
+ return 0;
+}
+
+static const struct file_operations zcrx_box_fops = {
+ .owner = THIS_MODULE,
+ .release = zcrx_box_release,
+};
+
+static int zcrx_export(struct io_ring_ctx *ctx, struct io_zcrx_ifq *ifq,
+ struct zcrx_ctrl *ctrl, void __user *arg)
+{
+ struct zcrx_ctrl_export *ce = &ctrl->zc_export;
+ struct file *file;
+ int fd = -1;
+
+ if (!mem_is_zero(ce, sizeof(*ce)))
+ return -EINVAL;
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ ce->zcrx_fd = fd;
+ if (copy_to_user(arg, ctrl, sizeof(*ctrl))) {
+ put_unused_fd(fd);
+ return -EFAULT;
+ }
+
+ refcount_inc(&ifq->refs);
+ refcount_inc(&ifq->user_refs);
+
+ file = anon_inode_create_getfile("[zcrx]", &zcrx_box_fops,
+ ifq, O_CLOEXEC, NULL);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ zcrx_unregister(ifq);
+ return PTR_ERR(file);
+ }
+
+ fd_install(fd, file);
+ return 0;
+}
+
+static int import_zcrx(struct io_ring_ctx *ctx,
+ struct io_uring_zcrx_ifq_reg __user *arg,
+ struct io_uring_zcrx_ifq_reg *reg)
+{
+ struct io_zcrx_ifq *ifq;
+ struct file *file;
+ int fd, ret;
+ u32 id;
+
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
+ return -EINVAL;
+ if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
+ return -EINVAL;
+
+ fd = reg->if_idx;
+ CLASS(fd, f)(fd);
+ if (fd_empty(f))
+ return -EBADF;
+
+ file = fd_file(f);
+ if (file->f_op != &zcrx_box_fops || !file->private_data)
+ return -EBADF;
+
+ ifq = file->private_data;
+ refcount_inc(&ifq->refs);
+ refcount_inc(&ifq->user_refs);
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
+ if (ret)
+ goto err;
+ }
+
+ reg->zcrx_id = id;
+ io_fill_zcrx_offsets(&reg->offsets);
+ if (copy_to_user(arg, reg, sizeof(*reg))) {
+ ret = -EFAULT;
+ goto err_xa_erase;
+ }
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ ret = -ENOMEM;
+ if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
+ goto err_xa_erase;
+ }
+
+ return 0;
+err_xa_erase:
+ scoped_guard(mutex, &ctx->mmap_lock)
+ xa_erase(&ctx->zcrx_ctxs, id);
+err:
+ zcrx_unregister(ifq);
+ return ret;
+}
+
+int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
+ struct io_uring_zcrx_ifq_reg __user *arg)
+{
+ struct pp_memory_provider_params mp_param = {};
+ struct io_uring_zcrx_area_reg area;
+ struct io_uring_zcrx_ifq_reg reg;
+ struct io_uring_region_desc rd;
+ struct io_zcrx_ifq *ifq;
+ int ret;
+ u32 id;
+
+ /*
+ * 1. Interface queue allocation.
+ * 2. It can observe data destined for sockets of other tasks.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* mandatory io_uring features for zc rx */
+ if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
+ return -EINVAL;
+ if (!(ctx->flags & (IORING_SETUP_CQE32|IORING_SETUP_CQE_MIXED)))
+ return -EINVAL;
+ if (copy_from_user(&reg, arg, sizeof(reg)))
+ return -EFAULT;
+ if (!mem_is_zero(&reg.__resv, sizeof(reg.__resv)) ||
+ reg.__resv2 || reg.zcrx_id)
+ return -EINVAL;
+ if (reg.flags & ZCRX_REG_IMPORT)
+ return import_zcrx(ctx, arg, &reg);
+ if (copy_from_user(&rd, u64_to_user_ptr(reg.region_ptr), sizeof(rd)))
+ return -EFAULT;
+ if (reg.if_rxq == -1 || !reg.rq_entries || reg.flags)
+ return -EINVAL;
+ if (reg.rq_entries > IO_RQ_MAX_ENTRIES) {
+ if (!(ctx->flags & IORING_SETUP_CLAMP))
+ return -EINVAL;
+ reg.rq_entries = IO_RQ_MAX_ENTRIES;
+ }
+ reg.rq_entries = roundup_pow_of_two(reg.rq_entries);
+
+ if (copy_from_user(&area, u64_to_user_ptr(reg.area_ptr), sizeof(area)))
+ return -EFAULT;
+
+ ifq = io_zcrx_ifq_alloc(ctx);
+ if (!ifq)
+ return -ENOMEM;
+
+ if (ctx->user) {
+ get_uid(ctx->user);
+ ifq->user = ctx->user;
+ }
+ if (ctx->mm_account) {
+ mmgrab(ctx->mm_account);
+ ifq->mm_account = ctx->mm_account;
+ }
+ ifq->rq_entries = reg.rq_entries;
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ /* preallocate id */
+ ret = xa_alloc(&ctx->zcrx_ctxs, &id, NULL, xa_limit_31b, GFP_KERNEL);
+ if (ret)
+ goto ifq_free;
+ }
+
+ ret = io_allocate_rbuf_ring(ctx, ifq, &reg, &rd, id);
+ if (ret)
+ goto err;
+
+ ifq->netdev = netdev_get_by_index_lock(current->nsproxy->net_ns, reg.if_idx);
+ if (!ifq->netdev) {
+ ret = -ENODEV;
+ goto err;
+ }
+ netdev_hold(ifq->netdev, &ifq->netdev_tracker, GFP_KERNEL);
+
+ ifq->dev = netdev_queue_get_dma_dev(ifq->netdev, reg.if_rxq);
+ if (!ifq->dev) {
+ ret = -EOPNOTSUPP;
+ goto netdev_put_unlock;
+ }
+ get_device(ifq->dev);
+
+ ret = io_zcrx_create_area(ifq, &area);
+ if (ret)
+ goto netdev_put_unlock;
+
+ mp_param.mp_ops = &io_uring_pp_zc_ops;
+ mp_param.mp_priv = ifq;
+ ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL);
+ if (ret)
+ goto netdev_put_unlock;
+ netdev_unlock(ifq->netdev);
+ ifq->if_rxq = reg.if_rxq;
+
+ reg.zcrx_id = id;
+
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ /* publish ifq */
+ ret = -ENOMEM;
+ if (xa_store(&ctx->zcrx_ctxs, id, ifq, GFP_KERNEL))
+ goto err;
+ }
+
+ if (copy_to_user(arg, &reg, sizeof(reg)) ||
+ copy_to_user(u64_to_user_ptr(reg.region_ptr), &rd, sizeof(rd)) ||
+ copy_to_user(u64_to_user_ptr(reg.area_ptr), &area, sizeof(area))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ return 0;
+netdev_put_unlock:
+ netdev_put(ifq->netdev, &ifq->netdev_tracker);
+ netdev_unlock(ifq->netdev);
+err:
+ scoped_guard(mutex, &ctx->mmap_lock)
+ xa_erase(&ctx->zcrx_ctxs, id);
+ifq_free:
+ io_zcrx_ifq_free(ifq);
+ return ret;
+}
+
+static struct net_iov *__io_zcrx_get_free_niov(struct io_zcrx_area *area)
+{
+ unsigned niov_idx;
+
+ lockdep_assert_held(&area->freelist_lock);
+
+ niov_idx = area->freelist[--area->free_count];
+ return &area->nia.niovs[niov_idx];
+}
+
+void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
+{
+ struct io_zcrx_ifq *ifq;
+
+ lockdep_assert_held(&ctx->uring_lock);
+
+ while (1) {
+ scoped_guard(mutex, &ctx->mmap_lock) {
+ unsigned long id = 0;
+
+ ifq = xa_find(&ctx->zcrx_ctxs, &id, ULONG_MAX, XA_PRESENT);
+ if (ifq)
+ xa_erase(&ctx->zcrx_ctxs, id);
+ }
+ if (!ifq)
+ break;
+ zcrx_unregister(ifq);
+ }
+
+ xa_destroy(&ctx->zcrx_ctxs);
+}
+
+static inline u32 io_zcrx_rqring_entries(struct io_zcrx_ifq *ifq)
+{
+ u32 entries;
+
+ entries = smp_load_acquire(&ifq->rq_ring->tail) - ifq->cached_rq_head;
+ return min(entries, ifq->rq_entries);
+}
+
+static struct io_uring_zcrx_rqe *io_zcrx_get_rqe(struct io_zcrx_ifq *ifq,
+ unsigned mask)
+{
+ unsigned int idx = ifq->cached_rq_head++ & mask;
+
+ return &ifq->rqes[idx];
+}
+
+static inline bool io_parse_rqe(struct io_uring_zcrx_rqe *rqe,
+ struct io_zcrx_ifq *ifq,
+ struct net_iov **ret_niov)
+{
+ unsigned niov_idx, area_idx;
+ struct io_zcrx_area *area;
+
+ area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT;
+ niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift;
+
+ if (unlikely(rqe->__pad || area_idx))
+ return false;
+ area = ifq->area;
+
+ if (unlikely(niov_idx >= area->nia.num_niovs))
+ return false;
+ niov_idx = array_index_nospec(niov_idx, area->nia.num_niovs);
+
+ *ret_niov = &area->nia.niovs[niov_idx];
+ return true;
+}
+
+static void io_zcrx_ring_refill(struct page_pool *pp,
+ struct io_zcrx_ifq *ifq)
+{
+ unsigned int mask = ifq->rq_entries - 1;
+ unsigned int entries;
+
+ guard(spinlock_bh)(&ifq->rq_lock);
+
+ entries = io_zcrx_rqring_entries(ifq);
+ entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL);
+ if (unlikely(!entries))
+ return;
+
+ do {
+ struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
+ struct net_iov *niov;
+ netmem_ref netmem;
+
+ if (!io_parse_rqe(rqe, ifq, &niov))
+ continue;
+ if (!io_zcrx_put_niov_uref(niov))
+ continue;
+
+ netmem = net_iov_to_netmem(niov);
+ if (!page_pool_unref_and_test(netmem))
+ continue;
+
+ if (unlikely(niov->desc.pp != pp)) {
+ io_zcrx_return_niov(niov);
+ continue;
+ }
+
+ io_zcrx_sync_for_device(pp, niov);
+ net_mp_netmem_place_in_cache(pp, netmem);
+ } while (--entries);
+
+ smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
+}
+
+static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
+{
+ struct io_zcrx_area *area = ifq->area;
+
+ spin_lock_bh(&area->freelist_lock);
+ while (area->free_count && pp->alloc.count < PP_ALLOC_CACHE_REFILL) {
+ struct net_iov *niov = __io_zcrx_get_free_niov(area);
+ netmem_ref netmem = net_iov_to_netmem(niov);
+
+ net_mp_niov_set_page_pool(pp, niov);
+ io_zcrx_sync_for_device(pp, niov);
+ net_mp_netmem_place_in_cache(pp, netmem);
+ }
+ spin_unlock_bh(&area->freelist_lock);
+}
+
+static netmem_ref io_pp_zc_alloc_netmems(struct page_pool *pp, gfp_t gfp)
+{
+ struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
+
+ /* pp should already be ensuring that */
+ if (unlikely(pp->alloc.count))
+ goto out_return;
+
+ io_zcrx_ring_refill(pp, ifq);
+ if (likely(pp->alloc.count))
+ goto out_return;
+
+ io_zcrx_refill_slow(pp, ifq);
+ if (!pp->alloc.count)
+ return 0;
+out_return:
+ return pp->alloc.cache[--pp->alloc.count];
+}
+
+static bool io_pp_zc_release_netmem(struct page_pool *pp, netmem_ref netmem)
+{
+ struct net_iov *niov;
+
+ if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
+ return false;
+
+ niov = netmem_to_net_iov(netmem);
+ net_mp_niov_clear_page_pool(niov);
+ io_zcrx_return_niov_freelist(niov);
+ return false;
+}
+
+static int io_pp_zc_init(struct page_pool *pp)
+{
+ struct io_zcrx_ifq *ifq = io_pp_to_ifq(pp);
+ int ret;
+
+ if (WARN_ON_ONCE(!ifq))
+ return -EINVAL;
+ if (WARN_ON_ONCE(ifq->dev != pp->p.dev))
+ return -EINVAL;
+ if (WARN_ON_ONCE(!pp->dma_map))
+ return -EOPNOTSUPP;
+ if (pp->p.order + PAGE_SHIFT != ifq->niov_shift)
+ return -EINVAL;
+ if (pp->p.dma_dir != DMA_FROM_DEVICE)
+ return -EOPNOTSUPP;
+
+ ret = io_zcrx_map_area(ifq, ifq->area);
+ if (ret)
+ return ret;
+
+ refcount_inc(&ifq->refs);
+ return 0;
+}
+
+static void io_pp_zc_destroy(struct page_pool *pp)
+{
+ io_put_zcrx_ifq(io_pp_to_ifq(pp));
+}
+
+static int io_pp_nl_fill(void *mp_priv, struct sk_buff *rsp,
+ struct netdev_rx_queue *rxq)
+{
+ struct nlattr *nest;
+ int type;
+
+ type = rxq ? NETDEV_A_QUEUE_IO_URING : NETDEV_A_PAGE_POOL_IO_URING;
+ nest = nla_nest_start(rsp, type);
+ if (!nest)
+ return -EMSGSIZE;
+ nla_nest_end(rsp, nest);
+
+ return 0;
+}
+
+static void io_pp_uninstall(void *mp_priv, struct netdev_rx_queue *rxq)
+{
+ struct pp_memory_provider_params *p = &rxq->mp_params;
+ struct io_zcrx_ifq *ifq = mp_priv;
+
+ io_zcrx_drop_netdev(ifq);
+ if (ifq->area)
+ io_zcrx_unmap_area(ifq, ifq->area);
+
+ p->mp_ops = NULL;
+ p->mp_priv = NULL;
+}
+
+static const struct memory_provider_ops io_uring_pp_zc_ops = {
+ .alloc_netmems = io_pp_zc_alloc_netmems,
+ .release_netmem = io_pp_zc_release_netmem,
+ .init = io_pp_zc_init,
+ .destroy = io_pp_zc_destroy,
+ .nl_fill = io_pp_nl_fill,
+ .uninstall = io_pp_uninstall,
+};
+
+static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
+ struct io_zcrx_ifq *zcrx)
+{
+ unsigned int mask = zcrx->rq_entries - 1;
+ unsigned int i;
+
+ guard(spinlock_bh)(&zcrx->rq_lock);
+
+ nr = min(nr, io_zcrx_rqring_entries(zcrx));
+ for (i = 0; i < nr; i++) {
+ struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
+ struct net_iov *niov;
+
+ if (!io_parse_rqe(rqe, zcrx, &niov))
+ break;
+ netmem_array[i] = net_iov_to_netmem(niov);
+ }
+
+ smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
+ return i;
+}
+
+#define ZCRX_FLUSH_BATCH 32
+
+static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
+{
+ unsigned i;
+
+ for (i = 0; i < nr; i++) {
+ netmem_ref netmem = netmems[i];
+ struct net_iov *niov = netmem_to_net_iov(netmem);
+
+ if (!io_zcrx_put_niov_uref(niov))
+ continue;
+ if (!page_pool_unref_and_test(netmem))
+ continue;
+ io_zcrx_return_niov(niov);
+ }
+}
+
+static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
+ struct zcrx_ctrl *ctrl)
+{
+ struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
+ netmem_ref netmems[ZCRX_FLUSH_BATCH];
+ unsigned total = 0;
+ unsigned nr;
+
+ if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
+ return -EINVAL;
+
+ do {
+ nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
+
+ zcrx_return_buffers(netmems, nr);
+ total += nr;
+
+ if (fatal_signal_pending(current))
+ break;
+ cond_resched();
+ } while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
+
+ return 0;
+}
+
+int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
+{
+ struct zcrx_ctrl ctrl;
+ struct io_zcrx_ifq *zcrx;
+
+ if (nr_args)
+ return -EINVAL;
+ if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
+ return -EFAULT;
+ if (!mem_is_zero(&ctrl.__resv, sizeof(ctrl.__resv)))
+ return -EFAULT;
+
+ zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
+ if (!zcrx)
+ return -ENXIO;
+
+ switch (ctrl.op) {
+ case ZCRX_CTRL_FLUSH_RQ:
+ return zcrx_flush_rq(ctx, zcrx, &ctrl);
+ case ZCRX_CTRL_EXPORT:
+ return zcrx_export(ctx, zcrx, &ctrl, arg);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
+ struct io_zcrx_ifq *ifq, int off, int len)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_zcrx_cqe *rcqe;
+ struct io_zcrx_area *area;
+ struct io_uring_cqe *cqe;
+ u64 offset;
+
+ if (!io_defer_get_uncommited_cqe(ctx, &cqe))
+ return false;
+
+ cqe->user_data = req->cqe.user_data;
+ cqe->res = len;
+ cqe->flags = IORING_CQE_F_MORE;
+ if (ctx->flags & IORING_SETUP_CQE_MIXED)
+ cqe->flags |= IORING_CQE_F_32;
+
+ area = io_zcrx_iov_to_area(niov);
+ offset = off + (net_iov_idx(niov) << ifq->niov_shift);
+ rcqe = (struct io_uring_zcrx_cqe *)(cqe + 1);
+ rcqe->off = offset + ((u64)area->area_id << IORING_ZCRX_AREA_SHIFT);
+ rcqe->__pad = 0;
+ return true;
+}
+
+static struct net_iov *io_alloc_fallback_niov(struct io_zcrx_ifq *ifq)
+{
+ struct io_zcrx_area *area = ifq->area;
+ struct net_iov *niov = NULL;
+
+ if (area->mem.is_dmabuf)
+ return NULL;
+
+ spin_lock_bh(&area->freelist_lock);
+ if (area->free_count)
+ niov = __io_zcrx_get_free_niov(area);
+ spin_unlock_bh(&area->freelist_lock);
+
+ if (niov)
+ page_pool_fragment_netmem(net_iov_to_netmem(niov), 1);
+ return niov;
+}
+
+struct io_copy_cache {
+ struct page *page;
+ unsigned long offset;
+ size_t size;
+};
+
+static ssize_t io_copy_page(struct io_copy_cache *cc, struct page *src_page,
+ unsigned int src_offset, size_t len)
+{
+ size_t copied = 0;
+
+ len = min(len, cc->size);
+
+ while (len) {
+ void *src_addr, *dst_addr;
+ struct page *dst_page = cc->page;
+ unsigned dst_offset = cc->offset;
+ size_t n = len;
+
+ if (folio_test_partial_kmap(page_folio(dst_page)) ||
+ folio_test_partial_kmap(page_folio(src_page))) {
+ dst_page += dst_offset / PAGE_SIZE;
+ dst_offset = offset_in_page(dst_offset);
+ src_page += src_offset / PAGE_SIZE;
+ src_offset = offset_in_page(src_offset);
+ n = min(PAGE_SIZE - src_offset, PAGE_SIZE - dst_offset);
+ n = min(n, len);
+ }
+
+ dst_addr = kmap_local_page(dst_page) + dst_offset;
+ src_addr = kmap_local_page(src_page) + src_offset;
+
+ memcpy(dst_addr, src_addr, n);
+
+ kunmap_local(src_addr);
+ kunmap_local(dst_addr);
+
+ cc->size -= n;
+ cc->offset += n;
+ src_offset += n;
+ len -= n;
+ copied += n;
+ }
+ return copied;
+}
+
+static ssize_t io_zcrx_copy_chunk(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ struct page *src_page, unsigned int src_offset,
+ size_t len)
+{
+ size_t copied = 0;
+ int ret = 0;
+
+ while (len) {
+ struct io_copy_cache cc;
+ struct net_iov *niov;
+ size_t n;
+
+ niov = io_alloc_fallback_niov(ifq);
+ if (!niov) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ cc.page = io_zcrx_iov_page(niov);
+ cc.offset = 0;
+ cc.size = PAGE_SIZE;
+
+ n = io_copy_page(&cc, src_page, src_offset, len);
+
+ if (!io_zcrx_queue_cqe(req, niov, ifq, 0, n)) {
+ io_zcrx_return_niov(niov);
+ ret = -ENOSPC;
+ break;
+ }
+
+ io_zcrx_get_niov_uref(niov);
+ src_offset += n;
+ len -= n;
+ copied += n;
+ }
+
+ return copied ? copied : ret;
+}
+
+static int io_zcrx_copy_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ const skb_frag_t *frag, int off, int len)
+{
+ struct page *page = skb_frag_page(frag);
+
+ return io_zcrx_copy_chunk(req, ifq, page, off + skb_frag_off(frag), len);
+}
+
+static int io_zcrx_recv_frag(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ const skb_frag_t *frag, int off, int len)
+{
+ struct net_iov *niov;
+ struct page_pool *pp;
+
+ if (unlikely(!skb_frag_is_net_iov(frag)))
+ return io_zcrx_copy_frag(req, ifq, frag, off, len);
+
+ niov = netmem_to_net_iov(frag->netmem);
+ pp = niov->desc.pp;
+
+ if (!pp || pp->mp_ops != &io_uring_pp_zc_ops || io_pp_to_ifq(pp) != ifq)
+ return -EFAULT;
+
+ if (!io_zcrx_queue_cqe(req, niov, ifq, off + skb_frag_off(frag), len))
+ return -ENOSPC;
+
+ /*
+ * Prevent it from being recycled while user is accessing it.
+ * It has to be done before grabbing a user reference.
+ */
+ page_pool_ref_netmem(net_iov_to_netmem(niov));
+ io_zcrx_get_niov_uref(niov);
+ return len;
+}
+
+static int
+io_zcrx_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ struct io_zcrx_args *args = desc->arg.data;
+ struct io_zcrx_ifq *ifq = args->ifq;
+ struct io_kiocb *req = args->req;
+ struct sk_buff *frag_iter;
+ unsigned start, start_off = offset;
+ int i, copy, end, off;
+ int ret = 0;
+
+ len = min_t(size_t, len, desc->count);
+ /*
+ * __tcp_read_sock() always calls io_zcrx_recv_skb one last time, even
+ * if desc->count is already 0. This is caused by the if (offset + 1 !=
+ * skb->len) check. Return early in this case to break out of
+ * __tcp_read_sock().
+ */
+ if (!len)
+ return 0;
+ if (unlikely(args->nr_skbs++ > IO_SKBS_PER_CALL_LIMIT))
+ return -EAGAIN;
+
+ if (unlikely(offset < skb_headlen(skb))) {
+ ssize_t copied;
+ size_t to_copy;
+
+ to_copy = min_t(size_t, skb_headlen(skb) - offset, len);
+ copied = io_zcrx_copy_chunk(req, ifq, virt_to_page(skb->data),
+ offset_in_page(skb->data) + offset,
+ to_copy);
+ if (copied < 0) {
+ ret = copied;
+ goto out;
+ }
+ offset += copied;
+ len -= copied;
+ if (!len)
+ goto out;
+ if (offset != skb_headlen(skb))
+ goto out;
+ }
+
+ start = skb_headlen(skb);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag;
+
+ if (WARN_ON(start > offset + len))
+ return -EFAULT;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ end = start + skb_frag_size(frag);
+
+ if (offset < end) {
+ copy = end - offset;
+ if (copy > len)
+ copy = len;
+
+ off = offset - start;
+ ret = io_zcrx_recv_frag(req, ifq, frag, off, copy);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ len -= ret;
+ if (len == 0 || ret != copy)
+ goto out;
+ }
+ start = end;
+ }
+
+ skb_walk_frags(skb, frag_iter) {
+ if (WARN_ON(start > offset + len))
+ return -EFAULT;
+
+ end = start + frag_iter->len;
+ if (offset < end) {
+ size_t count;
+
+ copy = end - offset;
+ if (copy > len)
+ copy = len;
+
+ off = offset - start;
+ count = desc->count;
+ ret = io_zcrx_recv_skb(desc, frag_iter, off, copy);
+ desc->count = count;
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ len -= ret;
+ if (len == 0 || ret != copy)
+ goto out;
+ }
+ start = end;
+ }
+
+out:
+ if (offset == start_off)
+ return ret;
+ desc->count -= (offset - start_off);
+ return offset - start_off;
+}
+
+static int io_zcrx_tcp_recvmsg(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ struct sock *sk, int flags,
+ unsigned issue_flags, unsigned int *outlen)
+{
+ unsigned int len = *outlen;
+ struct io_zcrx_args args = {
+ .req = req,
+ .ifq = ifq,
+ .sock = sk->sk_socket,
+ };
+ read_descriptor_t rd_desc = {
+ .count = len ? len : UINT_MAX,
+ .arg.data = &args,
+ };
+ int ret;
+
+ lock_sock(sk);
+ ret = tcp_read_sock(sk, &rd_desc, io_zcrx_recv_skb);
+ if (len && ret > 0)
+ *outlen = len - ret;
+ if (ret <= 0) {
+ if (ret < 0 || sock_flag(sk, SOCK_DONE))
+ goto out;
+ if (sk->sk_err)
+ ret = sock_error(sk);
+ else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto out;
+ else if (sk->sk_state == TCP_CLOSE)
+ ret = -ENOTCONN;
+ else
+ ret = -EAGAIN;
+ } else if (unlikely(args.nr_skbs > IO_SKBS_PER_CALL_LIMIT) &&
+ (issue_flags & IO_URING_F_MULTISHOT)) {
+ ret = IOU_REQUEUE;
+ } else if (sock_flag(sk, SOCK_DONE)) {
+ /* Make it to retry until it finally gets 0. */
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ ret = IOU_REQUEUE;
+ else
+ ret = -EAGAIN;
+ }
+out:
+ release_sock(sk);
+ return ret;
+}
+
+int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ struct socket *sock, unsigned int flags,
+ unsigned issue_flags, unsigned int *len)
+{
+ struct sock *sk = sock->sk;
+ const struct proto *prot = READ_ONCE(sk->sk_prot);
+
+ if (prot->recvmsg != tcp_recvmsg)
+ return -EPROTONOSUPPORT;
+
+ sock_rps_record_flow(sk);
+ return io_zcrx_tcp_recvmsg(req, ifq, sk, flags, issue_flags, len);
+}
diff --git a/io_uring/zcrx.h b/io_uring/zcrx.h
new file mode 100644
index 000000000000..32ab95b2cb81
--- /dev/null
+++ b/io_uring/zcrx.h
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef IOU_ZC_RX_H
+#define IOU_ZC_RX_H
+
+#include <linux/io_uring_types.h>
+#include <linux/dma-buf.h>
+#include <linux/socket.h>
+#include <net/page_pool/types.h>
+#include <net/net_trackers.h>
+
+struct io_zcrx_mem {
+ unsigned long size;
+ bool is_dmabuf;
+
+ struct page **pages;
+ unsigned long nr_folios;
+ struct sg_table page_sg_table;
+ unsigned long account_pages;
+ struct sg_table *sgt;
+
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dmabuf;
+};
+
+struct io_zcrx_area {
+ struct net_iov_area nia;
+ struct io_zcrx_ifq *ifq;
+ atomic_t *user_refs;
+
+ bool is_mapped;
+ u16 area_id;
+
+ /* freelist */
+ spinlock_t freelist_lock ____cacheline_aligned_in_smp;
+ u32 free_count;
+ u32 *freelist;
+
+ struct io_zcrx_mem mem;
+};
+
+struct io_zcrx_ifq {
+ struct io_zcrx_area *area;
+ unsigned niov_shift;
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ spinlock_t rq_lock ____cacheline_aligned_in_smp;
+ struct io_uring *rq_ring;
+ struct io_uring_zcrx_rqe *rqes;
+ u32 cached_rq_head;
+ u32 rq_entries;
+
+ u32 if_rxq;
+ struct device *dev;
+ struct net_device *netdev;
+ netdevice_tracker netdev_tracker;
+ refcount_t refs;
+ /* counts userspace facing users like io_uring */
+ refcount_t user_refs;
+
+ /*
+ * Page pool and net configuration lock, can be taken deeper in the
+ * net stack.
+ */
+ struct mutex pp_lock;
+ struct io_mapped_region region;
+};
+
+#if defined(CONFIG_IO_URING_ZCRX)
+int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_arg);
+int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
+ struct io_uring_zcrx_ifq_reg __user *arg);
+void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx);
+int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ struct socket *sock, unsigned int flags,
+ unsigned issue_flags, unsigned int *len);
+struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
+ unsigned int id);
+#else
+static inline int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
+ struct io_uring_zcrx_ifq_reg __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+static inline void io_unregister_zcrx_ifqs(struct io_ring_ctx *ctx)
+{
+}
+static inline int io_zcrx_recv(struct io_kiocb *req, struct io_zcrx_ifq *ifq,
+ struct socket *sock, unsigned int flags,
+ unsigned issue_flags, unsigned int *len)
+{
+ return -EOPNOTSUPP;
+}
+static inline struct io_mapped_region *io_zcrx_get_region(struct io_ring_ctx *ctx,
+ unsigned int id)
+{
+ return NULL;
+}
+static inline int io_zcrx_ctrl(struct io_ring_ctx *ctx,
+ void __user *arg, unsigned nr_arg)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+int io_recvzc(struct io_kiocb *req, unsigned int issue_flags);
+int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+
+#endif