summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c707
1 files changed, 259 insertions, 448 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a8413f006417..e0823f58f795 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -63,7 +63,6 @@
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
-#include <net/busy_poll.h>
#include <linux/anon_inodes.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -112,8 +111,7 @@
IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
- REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
- REQ_F_ASYNC_DATA)
+ REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
@@ -412,11 +410,6 @@ struct io_ring_ctx {
struct list_head sqd_list;
unsigned long check_cq_overflow;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- /* used to track busy poll napi_id */
- struct list_head napi_list;
- spinlock_t napi_lock; /* napi_list lock */
-#endif
struct {
unsigned cached_cq_tail;
@@ -500,7 +493,6 @@ struct io_uring_task {
const struct io_ring_ctx *last;
struct io_wq *io_wq;
struct percpu_counter inflight;
- atomic_t inflight_tracked;
atomic_t in_idle;
spinlock_t task_lock;
@@ -592,7 +584,8 @@ struct io_rw {
/* NOTE: kiocb has the file as the first member, so don't do it here */
struct kiocb kiocb;
u64 addr;
- u64 len;
+ u32 len;
+ u32 flags;
};
struct io_connect {
@@ -654,10 +647,10 @@ struct io_epoll {
struct io_splice {
struct file *file_out;
- struct file *file_in;
loff_t off_out;
loff_t off_in;
u64 len;
+ int splice_fd_in;
unsigned int flags;
};
@@ -914,7 +907,11 @@ struct io_kiocb {
u64 user_data;
u32 result;
- u32 cflags;
+ /* fd initially, then cflags for completion */
+ union {
+ u32 cflags;
+ int fd;
+ };
struct io_ring_ctx *ctx;
struct task_struct *task;
@@ -923,8 +920,12 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
- /* used by request caches, completion batching and iopoll */
- struct io_wq_work_node comp_list;
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ int apoll_events;
+ };
atomic_t refs;
atomic_t poll_refs;
struct io_task_work io_task_work;
@@ -1182,8 +1183,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
struct io_uring_rsrc_update2 *up,
unsigned nr_args);
static void io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd, bool fixed);
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+ unsigned issue_flags);
+static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
+static void io_drop_inflight_file(struct io_kiocb *req);
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
static void __io_queue_sqe(struct io_kiocb *req);
static void io_rsrc_put_work(struct work_struct *work);
@@ -1313,13 +1317,20 @@ static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
}
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
- struct io_ring_ctx *ctx)
+ struct io_ring_ctx *ctx,
+ unsigned int issue_flags)
{
if (!req->fixed_rsrc_refs) {
req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
- ctx->rsrc_cached_refs--;
- if (unlikely(ctx->rsrc_cached_refs < 0))
- io_rsrc_refs_refill(ctx);
+
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ lockdep_assert_held(&ctx->uring_lock);
+ ctx->rsrc_cached_refs--;
+ if (unlikely(ctx->rsrc_cached_refs < 0))
+ io_rsrc_refs_refill(ctx);
+ } else {
+ percpu_ref_get(req->fixed_rsrc_refs);
+ }
}
}
@@ -1424,29 +1435,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
__must_hold(&req->ctx->timeout_lock)
{
- struct io_kiocb *req;
-
if (task && head->task != task)
return false;
- if (cancel_all)
- return true;
-
- io_for_each_link(req, head) {
- if (req->flags & REQ_F_INFLIGHT)
- return true;
- }
- return false;
-}
-
-static bool io_match_linked(struct io_kiocb *head)
-{
- struct io_kiocb *req;
-
- io_for_each_link(req, head) {
- if (req->flags & REQ_F_INFLIGHT)
- return true;
- }
- return false;
+ return cancel_all;
}
/*
@@ -1456,24 +1447,9 @@ static bool io_match_linked(struct io_kiocb *head)
static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
{
- bool matched;
-
if (task && head->task != task)
return false;
- if (cancel_all)
- return true;
-
- if (head->flags & REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = head->ctx;
-
- /* protect against races with linked timeouts */
- spin_lock_irq(&ctx->timeout_lock);
- matched = io_match_linked(head);
- spin_unlock_irq(&ctx->timeout_lock);
- } else {
- matched = io_match_linked(head);
- }
- return matched;
+ return cancel_all;
}
static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1595,10 +1571,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_WQ_LIST(&ctx->locked_free_list);
INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- INIT_LIST_HEAD(&ctx->napi_list);
- spin_lock_init(&ctx->napi_lock);
-#endif
return ctx;
err:
kfree(ctx->dummy_ubuf);
@@ -1636,14 +1608,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
return req->flags & REQ_F_FIXED_FILE;
}
-static inline void io_req_track_inflight(struct io_kiocb *req)
-{
- if (!(req->flags & REQ_F_INFLIGHT)) {
- req->flags |= REQ_F_INFLIGHT;
- atomic_inc(&current->io_uring->inflight_tracked);
- }
-}
-
static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
{
if (WARN_ON_ONCE(!req->link))
@@ -1687,14 +1651,6 @@ static void io_prep_async_work(struct io_kiocb *req)
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
-
- switch (req->opcode) {
- case IORING_OP_SPLICE:
- case IORING_OP_TEE:
- if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
- req->work.flags |= IO_WQ_WORK_UNBOUND;
- break;
- }
}
static void io_prep_async_link(struct io_kiocb *req)
@@ -1788,12 +1744,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
__must_hold(&ctx->completion_lock)
{
u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+ struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->timeout_lock);
- while (!list_empty(&ctx->timeout_list)) {
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
u32 events_needed, events_got;
- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
- struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
@@ -1810,7 +1765,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
if (events_got < events_needed)
break;
- list_del_init(&req->timeout.list);
io_kill_timeout(req, 0);
}
ctx->cq_last_tm_flush = seq;
@@ -2562,6 +2516,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
WARN_ON_ONCE(!tctx);
+ io_drop_inflight_file(req);
+
spin_lock_irqsave(&tctx->task_lock, flags);
if (priority)
wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
@@ -2841,11 +2797,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
/* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break;
+ nr_events++;
if (unlikely(req->flags & REQ_F_CQE_SKIP))
continue;
-
__io_fill_cqe_req(req, req->result, io_put_kbuf(req, 0));
- nr_events++;
}
if (unlikely(!nr_events))
@@ -3186,42 +3141,11 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb;
- struct file *file = req->file;
unsigned ioprio;
int ret;
- if (!io_req_ffs_set(req))
- req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
-
kiocb->ki_pos = READ_ONCE(sqe->off);
- kiocb->ki_flags = iocb_flags(file);
- ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
- if (unlikely(ret))
- return ret;
-
- /*
- * If the file is marked O_NONBLOCK, still allow retry for it if it
- * supports async. Otherwise it's impossible to use O_NONBLOCK files
- * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
- */
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
- req->flags |= REQ_F_NOWAIT;
-
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
- return -EOPNOTSUPP;
-
- kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
- kiocb->ki_complete = io_complete_rw_iopoll;
- req->iopoll_completed = 0;
- } else {
- if (kiocb->ki_flags & IOCB_HIPRI)
- return -EINVAL;
- kiocb->ki_complete = io_complete_rw;
- }
ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) {
@@ -3237,6 +3161,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->imu = NULL;
req->rw.addr = READ_ONCE(sqe->addr);
req->rw.len = READ_ONCE(sqe->len);
+ req->rw.flags = READ_ONCE(sqe->rw_flags);
req->buf_index = READ_ONCE(sqe->buf_index);
return 0;
}
@@ -3265,19 +3190,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;
- bool is_stream = req->file->f_mode & FMODE_STREAM;
- if (kiocb->ki_pos == -1) {
- if (!is_stream) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = req->file->f_pos;
- return &kiocb->ki_pos;
- } else {
- kiocb->ki_pos = 0;
- return NULL;
- }
+ if (kiocb->ki_pos != -1)
+ return &kiocb->ki_pos;
+
+ if (!(req->file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = req->file->f_pos;
+ return &kiocb->ki_pos;
}
- return is_stream ? NULL : &kiocb->ki_pos;
+
+ kiocb->ki_pos = 0;
+ return NULL;
}
static void kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -3367,7 +3291,8 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
return 0;
}
-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
+static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
+ unsigned int issue_flags)
{
struct io_mapped_ubuf *imu = req->imu;
u16 index, buf_index = req->buf_index;
@@ -3377,7 +3302,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
if (unlikely(buf_index >= ctx->nr_user_bufs))
return -EFAULT;
- io_req_set_rsrc_node(req, ctx);
+ io_req_set_rsrc_node(req, ctx, issue_flags);
index = array_index_nospec(buf_index, ctx->nr_user_bufs);
imu = READ_ONCE(ctx->user_bufs[index]);
req->imu = imu;
@@ -3539,7 +3464,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
ssize_t ret;
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
- ret = io_import_fixed(req, rw, iter);
+ ret = io_import_fixed(req, rw, iter, issue_flags);
if (ret)
return ERR_PTR(ret);
return NULL;
@@ -3740,13 +3665,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
return 0;
}
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(!(req->file->f_mode & FMODE_READ)))
- return -EBADF;
- return io_prep_rw(req, sqe);
-}
-
/*
* This is our waitqueue callback handler, registered through __folio_lock_async()
* when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -3834,6 +3752,50 @@ static bool need_read_all(struct io_kiocb *req)
S_ISBLK(file_inode(req->file)->i_mode);
}
+static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
+{
+ struct kiocb *kiocb = &req->rw.kiocb;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct file *file = req->file;
+ int ret;
+
+ if (unlikely(!file || !(file->f_mode & mode)))
+ return -EBADF;
+
+ if (!io_req_ffs_set(req))
+ req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
+
+ kiocb->ki_flags = iocb_flags(file);
+ ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
+ if (unlikely(ret))
+ return ret;
+
+ /*
+ * If the file is marked O_NONBLOCK, still allow retry for it if it
+ * supports async. Otherwise it's impossible to use O_NONBLOCK files
+ * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+ */
+ if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+ ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+ req->flags |= REQ_F_NOWAIT;
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+ if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
+ return -EOPNOTSUPP;
+
+ kiocb->private = NULL;
+ kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
+ kiocb->ki_complete = io_complete_rw_iopoll;
+ req->iopoll_completed = 0;
+ } else {
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EINVAL;
+ kiocb->ki_complete = io_complete_rw;
+ }
+
+ return 0;
+}
+
static int io_read(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rw_state __s, *s = &__s;
@@ -3869,6 +3831,11 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
}
+ ret = io_rw_init_file(req, FMODE_READ);
+ if (unlikely(ret)) {
+ kfree(iovec);
+ return ret;
+ }
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
@@ -3972,13 +3939,6 @@ out_free:
return 0;
}
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
- if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
- return -EBADF;
- return io_prep_rw(req, sqe);
-}
-
static int io_write(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rw_state __s, *s = &__s;
@@ -3999,6 +3959,11 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_restore(&s->iter, &s->iter_state);
iovec = NULL;
}
+ ret = io_rw_init_file(req, FMODE_WRITE);
+ if (unlikely(ret)) {
+ kfree(iovec);
+ return ret;
+ }
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
@@ -4369,18 +4334,11 @@ static int __io_splice_prep(struct io_kiocb *req,
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- sp->file_in = NULL;
sp->len = READ_ONCE(sqe->len);
sp->flags = READ_ONCE(sqe->splice_flags);
-
if (unlikely(sp->flags & ~valid_flags))
return -EINVAL;
-
- sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
- (sp->flags & SPLICE_F_FD_IN_FIXED));
- if (!sp->file_in)
- return -EBADF;
- req->flags |= REQ_F_NEED_CLEANUP;
+ sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
return 0;
}
@@ -4395,20 +4353,29 @@ static int io_tee_prep(struct io_kiocb *req,
static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
- struct file *in = sp->file_in;
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+ struct file *in;
long ret = 0;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
+
+ if (sp->flags & SPLICE_F_FD_IN_FIXED)
+ in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+ else
+ in = io_file_get_normal(req, sp->splice_fd_in);
+ if (!in) {
+ ret = -EBADF;
+ goto done;
+ }
+
if (sp->len)
ret = do_tee(in, out, sp->len, flags);
if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
io_put_file(in);
- req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
if (ret != sp->len)
req_set_fail(req);
io_req_complete(req, ret);
@@ -4427,15 +4394,24 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_splice *sp = &req->splice;
- struct file *in = sp->file_in;
struct file *out = sp->file_out;
unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
loff_t *poff_in, *poff_out;
+ struct file *in;
long ret = 0;
if (issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
+ if (sp->flags & SPLICE_F_FD_IN_FIXED)
+ in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+ else
+ in = io_file_get_normal(req, sp->splice_fd_in);
+ if (!in) {
+ ret = -EBADF;
+ goto done;
+ }
+
poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
@@ -4444,8 +4420,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
io_put_file(in);
- req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
if (ret != sp->len)
req_set_fail(req);
io_req_complete(req, ret);
@@ -4506,6 +4481,9 @@ done:
if (ret < 0)
req_set_fail(req);
__io_req_complete(req, issue_flags, ret, 0);
+ /* put file to avoid an attempt to IOPOLL the req */
+ io_put_file(req->file);
+ req->file = NULL;
return 0;
}
@@ -4513,9 +4491,6 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!req->file)
- return -EBADF;
-
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
@@ -5236,6 +5211,8 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -5447,6 +5424,8 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -5757,108 +5736,6 @@ IO_NETOP_FN(send);
IO_NETOP_FN(recv);
#endif /* CONFIG_NET */
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-#define NAPI_TIMEOUT (60 * SEC_CONVERSION)
-
-struct napi_entry {
- struct list_head list;
- unsigned int napi_id;
- unsigned long timeout;
-};
-
-/*
- * Add busy poll NAPI ID from sk.
- */
-static void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
- unsigned int napi_id;
- struct socket *sock;
- struct sock *sk;
- struct napi_entry *ne;
-
- if (!net_busy_loop_on())
- return;
-
- sock = sock_from_file(file);
- if (!sock)
- return;
-
- sk = sock->sk;
- if (!sk)
- return;
-
- napi_id = READ_ONCE(sk->sk_napi_id);
-
- /* Non-NAPI IDs can be rejected */
- if (napi_id < MIN_NAPI_ID)
- return;
-
- spin_lock(&ctx->napi_lock);
- list_for_each_entry(ne, &ctx->napi_list, list) {
- if (ne->napi_id == napi_id) {
- ne->timeout = jiffies + NAPI_TIMEOUT;
- goto out;
- }
- }
-
- ne = kmalloc(sizeof(*ne), GFP_NOWAIT);
- if (!ne)
- goto out;
-
- ne->napi_id = napi_id;
- ne->timeout = jiffies + NAPI_TIMEOUT;
- list_add_tail(&ne->list, &ctx->napi_list);
-out:
- spin_unlock(&ctx->napi_lock);
-}
-
-static inline void io_check_napi_entry_timeout(struct napi_entry *ne)
-{
- if (time_after(jiffies, ne->timeout)) {
- list_del(&ne->list);
- kfree(ne);
- }
-}
-
-/*
- * Busy poll if globally on and supporting sockets found
- */
-static bool io_napi_busy_loop(struct list_head *napi_list)
-{
- struct napi_entry *ne, *n;
-
- list_for_each_entry_safe(ne, n, napi_list, list) {
- napi_busy_loop(ne->napi_id, NULL, NULL, true,
- BUSY_POLL_BUDGET);
- io_check_napi_entry_timeout(ne);
- }
- return !list_empty(napi_list);
-}
-
-static void io_free_napi_list(struct io_ring_ctx *ctx)
-{
- spin_lock(&ctx->napi_lock);
- while (!list_empty(&ctx->napi_list)) {
- struct napi_entry *ne =
- list_first_entry(&ctx->napi_list, struct napi_entry,
- list);
-
- list_del(&ne->list);
- kfree(ne);
- }
- spin_unlock(&ctx->napi_lock);
-}
-#else
-static inline void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
-}
-
-static inline void io_free_napi_list(struct io_ring_ctx *ctx)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
struct io_poll_table {
struct poll_table_struct pt;
struct io_kiocb *req;
@@ -5972,10 +5849,9 @@ static void io_poll_remove_entries(struct io_kiocb *req)
* either spurious wakeup or multishot CQE is served. 0 when it's done with
* the request, then the mask is stored in req->result.
*/
-static int io_poll_check_events(struct io_kiocb *req)
+static int io_poll_check_events(struct io_kiocb *req, bool locked)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_iocb *poll = io_poll_get_single(req);
int v;
/* req->task == current here, checking PF_EXITING is safe */
@@ -5992,14 +5868,17 @@ static int io_poll_check_events(struct io_kiocb *req)
return -ECANCELED;
if (!req->result) {
- struct poll_table_struct pt = { ._key = req->cflags };
+ struct poll_table_struct pt = { ._key = req->apoll_events };
+ unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
- req->result = vfs_poll(req->file, &pt) & req->cflags;
+ if (unlikely(!io_assign_file(req, flags)))
+ return -EBADF;
+ req->result = vfs_poll(req->file, &pt) & req->apoll_events;
}
/* multishot, just fill an CQE and proceed */
- if (req->result && !(req->cflags & EPOLLONESHOT)) {
- __poll_t mask = mangle_poll(req->result & poll->events);
+ if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+ __poll_t mask = mangle_poll(req->result & req->apoll_events);
bool filled;
spin_lock(&ctx->completion_lock);
@@ -6010,7 +5889,6 @@ static int io_poll_check_events(struct io_kiocb *req)
if (unlikely(!filled))
return -ECANCELED;
io_cqring_ev_posted(ctx);
- io_add_napi(req->file, ctx);
} else if (req->result) {
return 0;
}
@@ -6029,7 +5907,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
- ret = io_poll_check_events(req);
+ ret = io_poll_check_events(req, *locked);
if (ret > 0)
return;
@@ -6054,7 +5932,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
struct io_ring_ctx *ctx = req->ctx;
int ret;
- ret = io_poll_check_events(req);
+ ret = io_poll_check_events(req, *locked);
if (ret > 0)
return;
@@ -6078,7 +5956,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
* CPU. We want to avoid pulling in req->apoll->events for that
* case.
*/
- req->cflags = events;
+ req->apoll_events = events;
if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func;
else
@@ -6261,7 +6139,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
__io_poll_execute(req, mask, poll->events);
return 0;
}
- io_add_napi(req->file, req->ctx);
/*
* Release ownership. If someone tried to queue a tw while it was
@@ -6471,7 +6348,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL;
io_req_set_refcount(req);
- req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+ req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
@@ -6766,6 +6643,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
return -EINVAL;
+ INIT_LIST_HEAD(&req->timeout.list);
data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
@@ -6972,6 +6850,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
up.nr = 0;
up.tags = 0;
up.resv = 0;
+ up.resv2 = 0;
io_ring_submit_lock(ctx, needs_lock);
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
@@ -6992,11 +6871,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_READ:
- return io_read_prep(req, sqe);
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
case IORING_OP_WRITE:
- return io_write_prep(req, sqe);
+ return io_prep_rw(req, sqe);
case IORING_OP_POLL_ADD:
return io_poll_add_prep(req, sqe);
case IORING_OP_POLL_REMOVE:
@@ -7072,7 +6950,12 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_req_prep_async(struct io_kiocb *req)
{
- if (!io_op_defs[req->opcode].needs_async_setup)
+ const struct io_op_def *def = &io_op_defs[req->opcode];
+
+ /* assign early for deferred execution for non-fixed file */
+ if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
+ req->file = io_file_get_normal(req, req->fd);
+ if (!def->needs_async_setup)
return 0;
if (WARN_ON_ONCE(req_has_async_data(req)))
return -EFAULT;
@@ -7179,11 +7062,6 @@ static void io_clean_op(struct io_kiocb *req)
kfree(io->free_iov);
break;
}
- case IORING_OP_SPLICE:
- case IORING_OP_TEE:
- if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
- io_put_file(req->splice.file_in);
- break;
case IORING_OP_OPENAT:
case IORING_OP_OPENAT2:
if (req->open.filename)
@@ -7218,11 +7096,6 @@ static void io_clean_op(struct io_kiocb *req)
kfree(req->apoll);
req->apoll = NULL;
}
- if (req->flags & REQ_F_INFLIGHT) {
- struct io_uring_task *tctx = req->task->io_uring;
-
- atomic_dec(&tctx->inflight_tracked);
- }
if (req->flags & REQ_F_CREDS)
put_cred(req->creds);
if (req->flags & REQ_F_ASYNC_DATA) {
@@ -7232,11 +7105,31 @@ static void io_clean_op(struct io_kiocb *req)
req->flags &= ~IO_REQ_CLEAN_FLAGS;
}
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+{
+ if (req->file || !io_op_defs[req->opcode].needs_file)
+ return true;
+
+ if (req->flags & REQ_F_FIXED_FILE)
+ req->file = io_file_get_fixed(req, req->fd, issue_flags);
+ else
+ req->file = io_file_get_normal(req, req->fd);
+ if (req->file)
+ return true;
+
+ req_set_fail(req);
+ req->result = -EBADF;
+ return false;
+}
+
static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
{
const struct cred *creds = NULL;
int ret;
+ if (unlikely(!io_assign_file(req, issue_flags)))
+ return -EBADF;
+
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
creds = override_creds(req->creds);
@@ -7386,10 +7279,11 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
static void io_wq_submit_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ const struct io_op_def *def = &io_op_defs[req->opcode];
unsigned int issue_flags = IO_URING_F_UNLOCKED;
bool needs_poll = false;
struct io_kiocb *timeout;
- int ret = 0;
+ int ret = 0, err = -ECANCELED;
/* one will be dropped by ->io_free_work() after returning to io-wq */
if (!(req->flags & REQ_F_REFCOUNT))
@@ -7401,14 +7295,20 @@ static void io_wq_submit_work(struct io_wq_work *work)
if (timeout)
io_queue_linked_timeout(timeout);
+
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
if (work->flags & IO_WQ_WORK_CANCEL) {
- io_req_task_queue_fail(req, -ECANCELED);
+fail:
+ io_req_task_queue_fail(req, err);
return;
}
+ if (!io_assign_file(req, issue_flags)) {
+ err = -EBADF;
+ work->flags |= IO_WQ_WORK_CANCEL;
+ goto fail;
+ }
if (req->flags & REQ_F_FORCE_ASYNC) {
- const struct io_op_def *def = &io_op_defs[req->opcode];
bool opcode_poll = def->pollin || def->pollout;
if (opcode_poll && file_can_poll(req->file)) {
@@ -7465,46 +7365,56 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
file_slot->file_ptr = file_ptr;
}
-static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd)
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+ unsigned int issue_flags)
{
- struct file *file;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct file *file = NULL;
unsigned long file_ptr;
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_lock(&ctx->uring_lock);
+
if (unlikely((unsigned int)fd >= ctx->nr_user_files))
- return NULL;
+ goto out;
fd = array_index_nospec(fd, ctx->nr_user_files);
file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
file = (struct file *) (file_ptr & FFS_MASK);
file_ptr &= ~FFS_MASK;
/* mask in overlapping REQ_F and FFS bits */
req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
- io_req_set_rsrc_node(req, ctx);
+ io_req_set_rsrc_node(req, ctx, 0);
+out:
+ if (issue_flags & IO_URING_F_UNLOCKED)
+ mutex_unlock(&ctx->uring_lock);
return file;
}
-static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd)
+/*
+ * Drop the file for requeue operations. Only used of req->file is the
+ * io_uring descriptor itself.
+ */
+static void io_drop_inflight_file(struct io_kiocb *req)
+{
+ if (unlikely(req->flags & REQ_F_INFLIGHT)) {
+ fput(req->file);
+ req->file = NULL;
+ req->flags &= ~REQ_F_INFLIGHT;
+ }
+}
+
+static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
{
struct file *file = fget(fd);
- trace_io_uring_file_get(ctx, req, req->user_data, fd);
+ trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
/* we don't allow fixed io_uring files */
- if (file && unlikely(file->f_op == &io_uring_fops))
- io_req_track_inflight(req);
+ if (file && file->f_op == &io_uring_fops)
+ req->flags |= REQ_F_INFLIGHT;
return file;
}
-static inline struct file *io_file_get(struct io_ring_ctx *ctx,
- struct io_kiocb *req, int fd, bool fixed)
-{
- if (fixed)
- return io_file_get_fixed(ctx, req, fd);
- else
- return io_file_get_normal(ctx, req, fd);
-}
-
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
struct io_kiocb *prev = req->timeout.prev;
@@ -7744,6 +7654,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[opcode].needs_file) {
struct io_submit_state *state = &ctx->submit_state;
+ req->fd = READ_ONCE(sqe->fd);
+
/*
* Plug now if we have more than 2 IO left after this, and the
* target is potentially a read/write to block based storage.
@@ -7753,11 +7665,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
state->need_plug = false;
blk_start_plug_nr_ios(&state->plug, state->submit_nr);
}
-
- req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
- (sqe_flags & IOSQE_FIXED_FILE));
- if (unlikely(!req->file))
- return -EBADF;
}
personality = READ_ONCE(sqe->personality);
@@ -8032,13 +7939,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
!(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- spin_lock(&ctx->napi_lock);
- if (!list_empty(&ctx->napi_list) &&
- io_napi_busy_loop(&ctx->napi_list))
- ++ret;
- spin_unlock(&ctx->napi_lock);
-#endif
+
if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
wake_up(&ctx->sqo_sq_wait);
if (creds)
@@ -8176,9 +8077,6 @@ struct io_wait_queue {
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned busy_poll_to;
-#endif
};
static inline bool io_should_wake(struct io_wait_queue *iowq)
@@ -8240,87 +8138,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return 1;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static void io_adjust_busy_loop_timeout(struct timespec64 *ts,
- struct io_wait_queue *iowq)
-{
- unsigned busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
- struct timespec64 pollto = ns_to_timespec64(1000 * (s64)busy_poll_to);
-
- if (timespec64_compare(ts, &pollto) > 0) {
- *ts = timespec64_sub(*ts, pollto);
- iowq->busy_poll_to = busy_poll_to;
- } else {
- u64 to = timespec64_to_ns(ts);
-
- do_div(to, 1000);
- iowq->busy_poll_to = to;
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
- }
-}
-
-static inline bool io_busy_loop_timeout(unsigned long start_time,
- unsigned long bp_usec)
-{
- if (bp_usec) {
- unsigned long end_time = start_time + bp_usec;
- unsigned long now = busy_loop_current_time();
-
- return time_after(now, end_time);
- }
- return true;
-}
-
-static bool io_busy_loop_end(void *p, unsigned long start_time)
-{
- struct io_wait_queue *iowq = p;
-
- return signal_pending(current) ||
- io_should_wake(iowq) ||
- io_busy_loop_timeout(start_time, iowq->busy_poll_to);
-}
-
-static void io_blocking_napi_busy_loop(struct list_head *napi_list,
- struct io_wait_queue *iowq)
-{
- unsigned long start_time =
- list_is_singular(napi_list) ? 0 :
- busy_loop_current_time();
-
- do {
- if (list_is_singular(napi_list)) {
- struct napi_entry *ne =
- list_first_entry(napi_list,
- struct napi_entry, list);
-
- napi_busy_loop(ne->napi_id, io_busy_loop_end, iowq,
- true, BUSY_POLL_BUDGET);
- io_check_napi_entry_timeout(ne);
- break;
- }
- } while (io_napi_busy_loop(napi_list) &&
- !io_busy_loop_end(iowq, start_time));
-}
-
-static void io_putback_napi_list(struct io_ring_ctx *ctx,
- struct list_head *napi_list)
-{
- struct napi_entry *cne, *lne;
-
- spin_lock(&ctx->napi_lock);
- list_for_each_entry(cne, &ctx->napi_list, list)
- list_for_each_entry(lne, napi_list, list)
- if (cne->napi_id == lne->napi_id) {
- list_del(&lne->list);
- kfree(lne);
- break;
- }
- list_splice(napi_list, &ctx->napi_list);
- spin_unlock(&ctx->napi_lock);
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -8333,9 +8150,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
struct io_rings *rings = ctx->rings;
ktime_t timeout = KTIME_MAX;
int ret;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- LIST_HEAD(local_napi_list);
-#endif
do {
io_cqring_overflow_flush(ctx);
@@ -8358,29 +8172,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
- iowq.busy_poll_to = 0;
- if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
- spin_lock(&ctx->napi_lock);
- list_splice_init(&ctx->napi_list, &local_napi_list);
- spin_unlock(&ctx->napi_lock);
- }
-#endif
if (uts) {
struct timespec64 ts;
if (get_timespec64(&ts, uts))
return -EFAULT;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- if (!list_empty(&local_napi_list))
- io_adjust_busy_loop_timeout(&ts, &iowq);
-#endif
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
-#ifdef CONFIG_NET_RX_BUSY_POLL
- else if (!list_empty(&local_napi_list))
- iowq.busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
-#endif
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
@@ -8390,12 +8188,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
trace_io_uring_cqring_wait(ctx, min_events);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- if (iowq.busy_poll_to)
- io_blocking_napi_busy_loop(&local_napi_list, &iowq);
- if (!list_empty(&local_napi_list))
- io_putback_napi_list(ctx, &local_napi_list);
-#endif
do {
/* if we can't even flush overflow, don't wait for more */
if (!io_cqring_overflow_flush(ctx)) {
@@ -8864,8 +8656,12 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb_queue_head(&sk->sk_receive_queue, skb);
- for (i = 0; i < nr_files; i++)
- fput(fpl->fp[i]);
+ for (i = 0; i < nr; i++) {
+ struct file *file = io_file_from_index(ctx, i + offset);
+
+ if (file)
+ fput(file);
+ }
} else {
kfree_skb(skb);
free_uid(fpl->user);
@@ -9156,13 +8952,15 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
struct io_rsrc_node *node, void *rsrc)
{
+ u64 *tag_slot = io_get_tag_slot(data, idx);
struct io_rsrc_put *prsrc;
prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
if (!prsrc)
return -ENOMEM;
- prsrc->tag = *io_get_tag_slot(data, idx);
+ prsrc->tag = *tag_slot;
+ *tag_slot = 0;
prsrc->rsrc = rsrc;
list_add(&prsrc->list, &node->rsrc_list);
return 0;
@@ -9231,7 +9029,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
struct io_fixed_file *file_slot;
struct file *file;
- int ret, i;
+ int ret;
io_ring_submit_lock(ctx, needs_lock);
ret = -ENXIO;
@@ -9244,8 +9042,8 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
if (ret)
goto out;
- i = array_index_nospec(offset, ctx->nr_user_files);
- file_slot = io_fixed_file_slot(&ctx->file_table, i);
+ offset = array_index_nospec(offset, ctx->nr_user_files);
+ file_slot = io_fixed_file_slot(&ctx->file_table, offset);
ret = -EBADF;
if (!file_slot->file_ptr)
goto out;
@@ -9301,8 +9099,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (file_slot->file_ptr) {
file = (struct file *)(file_slot->file_ptr & FFS_MASK);
- err = io_queue_rsrc_removal(data, up->offset + done,
- ctx->rsrc_node, file);
+ err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
if (err)
break;
file_slot->file_ptr = 0;
@@ -9327,7 +9124,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
err = -EBADF;
break;
}
- *io_get_tag_slot(data, up->offset + done) = tag;
+ *io_get_tag_slot(data, i) = tag;
io_fixed_file_set(file_slot, file);
err = io_sqe_file_register(ctx, file, i);
if (err) {
@@ -9411,7 +9208,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
xa_init(&tctx->xa);
init_waitqueue_head(&tctx->wait);
atomic_set(&tctx->in_idle, 0);
- atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx;
spin_lock_init(&tctx->task_lock);
INIT_WQ_LIST(&tctx->task_list);
@@ -9986,7 +9782,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
i = array_index_nospec(offset, ctx->nr_user_bufs);
if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
- err = io_queue_rsrc_removal(ctx->buf_data, offset,
+ err = io_queue_rsrc_removal(ctx->buf_data, i,
ctx->rsrc_node, ctx->user_bufs[i]);
if (unlikely(err)) {
io_buffer_unmap(ctx, &imu);
@@ -10181,7 +9977,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_req_caches_free(ctx);
if (ctx->hash_map)
io_wq_put_hash(ctx->hash_map);
- io_free_napi_list(ctx);
kfree(ctx->cancel_hash);
kfree(ctx->dummy_ubuf);
kfree(ctx->io_buffers);
@@ -10604,7 +10399,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
{
if (tracked)
- return atomic_read(&tctx->inflight_tracked);
+ return 0;
return percpu_counter_sum(&tctx->inflight);
}
@@ -10755,6 +10550,11 @@ static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
break;
}
+ if (reg.resv) {
+ ret = -EINVAL;
+ break;
+ }
+
if (reg.offset == -1U) {
start = 0;
end = IO_RINGFD_REG_MAX;
@@ -10801,7 +10601,7 @@ static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
ret = -EFAULT;
break;
}
- if (reg.offset >= IO_RINGFD_REG_MAX) {
+ if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
ret = -EINVAL;
break;
}
@@ -10928,6 +10728,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
return -EINVAL;
if (copy_from_user(&arg, argp, sizeof(arg)))
return -EFAULT;
+ if (arg.pad)
+ return -EINVAL;
*sig = u64_to_user_ptr(arg.sigmask);
*argsz = arg.sigmask_sz;
*ts = u64_to_user_ptr(arg.ts);
@@ -11409,7 +11211,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+ IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+ IORING_FEAT_LINKED_FILE;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -11620,8 +11423,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
__u32 tmp;
int err;
- if (up->resv)
- return -EINVAL;
if (check_add_overflow(up->offset, nr_args, &tmp))
return -EOVERFLOW;
err = io_rsrc_node_switch_start(ctx);
@@ -11647,6 +11448,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
memset(&up, 0, sizeof(up));
if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
return -EFAULT;
+ if (up.resv || up.resv2)
+ return -EINVAL;
return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
}
@@ -11659,7 +11462,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
- if (!up.nr || up.resv)
+ if (!up.nr || up.resv || up.resv2)
return -EINVAL;
return __io_register_rsrc_update(ctx, type, &up, up.nr);
}
@@ -11707,7 +11510,15 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
if (len > cpumask_size())
len = cpumask_size();
- if (copy_from_user(new_mask, arg, len)) {
+ if (in_compat_syscall()) {
+ ret = compat_get_bitmap(cpumask_bits(new_mask),
+ (const compat_ulong_t __user *)arg,
+ len * 8 /* CHAR_BIT */);
+ } else {
+ ret = copy_from_user(new_mask, arg, len);
+ }
+
+ if (ret) {
free_cpumask_var(new_mask);
return -EFAULT;
}