summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h309
1 files changed, 236 insertions, 73 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 726e6367af4d..a790c16854d3 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -8,42 +8,126 @@
#include <linux/poll.h>
#include <linux/io_uring_types.h>
#include <uapi/linux/eventpoll.h>
+#include "alloc_cache.h"
#include "io-wq.h"
#include "slist.h"
-#include "filetable.h"
+#include "opdef.h"
#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#endif
+struct io_rings_layout {
+ /* size of CQ + headers + SQ offset array */
+ size_t rings_size;
+ size_t sq_size;
+
+ size_t sq_array_offset;
+};
+
+struct io_ctx_config {
+ struct io_uring_params p;
+ struct io_rings_layout layout;
+ struct io_uring_params __user *uptr;
+};
+
+#define IORING_FEAT_FLAGS (IORING_FEAT_SINGLE_MMAP |\
+ IORING_FEAT_NODROP |\
+ IORING_FEAT_SUBMIT_STABLE |\
+ IORING_FEAT_RW_CUR_POS |\
+ IORING_FEAT_CUR_PERSONALITY |\
+ IORING_FEAT_FAST_POLL |\
+ IORING_FEAT_POLL_32BITS |\
+ IORING_FEAT_SQPOLL_NONFIXED |\
+ IORING_FEAT_EXT_ARG |\
+ IORING_FEAT_NATIVE_WORKERS |\
+ IORING_FEAT_RSRC_TAGS |\
+ IORING_FEAT_CQE_SKIP |\
+ IORING_FEAT_LINKED_FILE |\
+ IORING_FEAT_REG_REG_RING |\
+ IORING_FEAT_RECVSEND_BUNDLE |\
+ IORING_FEAT_MIN_TIMEOUT |\
+ IORING_FEAT_RW_ATTR |\
+ IORING_FEAT_NO_IOWAIT)
+
+#define IORING_SETUP_FLAGS (IORING_SETUP_IOPOLL |\
+ IORING_SETUP_SQPOLL |\
+ IORING_SETUP_SQ_AFF |\
+ IORING_SETUP_CQSIZE |\
+ IORING_SETUP_CLAMP |\
+ IORING_SETUP_ATTACH_WQ |\
+ IORING_SETUP_R_DISABLED |\
+ IORING_SETUP_SUBMIT_ALL |\
+ IORING_SETUP_COOP_TASKRUN |\
+ IORING_SETUP_TASKRUN_FLAG |\
+ IORING_SETUP_SQE128 |\
+ IORING_SETUP_CQE32 |\
+ IORING_SETUP_SINGLE_ISSUER |\
+ IORING_SETUP_DEFER_TASKRUN |\
+ IORING_SETUP_NO_MMAP |\
+ IORING_SETUP_REGISTERED_FD_ONLY |\
+ IORING_SETUP_NO_SQARRAY |\
+ IORING_SETUP_HYBRID_IOPOLL |\
+ IORING_SETUP_CQE_MIXED |\
+ IORING_SETUP_SQE_MIXED)
+
+#define IORING_ENTER_FLAGS (IORING_ENTER_GETEVENTS |\
+ IORING_ENTER_SQ_WAKEUP |\
+ IORING_ENTER_SQ_WAIT |\
+ IORING_ENTER_EXT_ARG |\
+ IORING_ENTER_REGISTERED_RING |\
+ IORING_ENTER_ABS_TIMER |\
+ IORING_ENTER_EXT_ARG_REG |\
+ IORING_ENTER_NO_IOWAIT)
+
+
+#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE |\
+ IOSQE_IO_DRAIN |\
+ IOSQE_IO_LINK |\
+ IOSQE_IO_HARDLINK |\
+ IOSQE_ASYNC |\
+ IOSQE_BUFFER_SELECT |\
+ IOSQE_CQE_SKIP_SUCCESS)
+
enum {
- IOU_OK = 0,
+ IOU_COMPLETE = 0,
+
IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
/*
+ * The request has more work to do and should be retried. io_uring will
+ * attempt to wait on the file for eligible opcodes, but otherwise
+ * it'll be handed to iowq for blocking execution. It works for normal
+ * requests as well as for the multi shot mode.
+ */
+ IOU_RETRY = -EAGAIN,
+
+ /*
* Requeue the task_work to restart operations on this request. The
* actual value isn't important, should just be not an otherwise
* valid error code, yet less than -MAX_ERRNO and valid internally.
*/
IOU_REQUEUE = -3072,
+};
- /*
- * Intended only when both IO_URING_F_MULTISHOT is passed
- * to indicate to the poll runner that multishot should be
- * removed and the result is set on req->cqe.res.
- */
- IOU_STOP_MULTISHOT = -ECANCELED,
+struct io_defer_entry {
+ struct list_head list;
+ struct io_kiocb *req;
};
struct io_wait_queue {
struct wait_queue_entry wq;
struct io_ring_ctx *ctx;
unsigned cq_tail;
+ unsigned cq_min_tail;
unsigned nr_timeouts;
+ int hit_timeout;
+ ktime_t min_timeout;
ktime_t timeout;
+ struct hrtimer t;
#ifdef CONFIG_NET_RX_BUSY_POLL
- unsigned int napi_busy_poll_to;
+ ktime_t napi_busy_poll_dt;
bool napi_prefer_busy_poll;
#endif
};
@@ -61,36 +145,46 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
-bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
+#define IORING_MAX_ENTRIES 32768
+#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
+
+int io_prepare_config(struct io_ctx_config *config);
+
+bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
+int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
+void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags);
+bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]);
void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
+unsigned io_linked_nr(struct io_kiocb *req);
+void io_req_track_inflight(struct io_kiocb *req);
struct file *io_file_get_normal(struct io_kiocb *req, int fd);
struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
-bool io_alloc_async_data(struct io_kiocb *req);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
void io_req_task_queue(struct io_kiocb *req);
-void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
+void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
-void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts);
+void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw);
struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries);
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count);
void tctx_task_work(struct callback_head *cb);
-__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-int io_uring_alloc_task_context(struct task_struct *task,
- struct io_ring_ctx *ctx);
+__cold void io_uring_drop_tctx_refs(struct task_struct *task);
int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int start, int end);
+void io_req_queue_iowq(struct io_kiocb *req);
-int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts);
+int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw);
int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
+__cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx);
void __io_submit_flush_completions(struct io_ring_ctx *ctx);
struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
@@ -101,15 +195,6 @@ void io_queue_next(struct io_kiocb *req);
void io_task_refs_refill(struct io_uring_task *tctx);
bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
-bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
- bool cancel_all);
-
-enum {
- IO_EVENTFD_OP_SIGNAL_BIT,
- IO_EVENTFD_OP_FREE_BIT,
-};
-
-void io_eventfd_ops(struct rcu_head *rcu);
void io_activate_pollwq(struct io_ring_ctx *ctx);
static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
@@ -117,6 +202,9 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
#if defined(CONFIG_PROVE_LOCKING)
lockdep_assert(in_task());
+ if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ lockdep_assert_held(&ctx->uring_lock);
+
if (ctx->flags & IORING_SETUP_IOPOLL) {
lockdep_assert_held(&ctx->uring_lock);
} else if (!ctx->task_complete) {
@@ -128,14 +216,17 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
* Not from an SQE, as those cannot be submitted, but via
* updating tagged resources.
*/
- if (ctx->submitter_task->flags & PF_EXITING)
- lockdep_assert(current_work());
- else
+ if (!percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current == ctx->submitter_task);
}
#endif
}
+static inline bool io_is_compat(struct io_ring_ctx *ctx)
+{
+ return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat);
+}
+
static inline void io_req_task_work_add(struct io_kiocb *req)
{
__io_req_task_work_add(req, 0);
@@ -153,50 +244,63 @@ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx,
struct io_uring_cqe **ret,
- bool overflow)
+ bool overflow, bool cqe32)
{
io_lockdep_assert_cq_locked(ctx);
- if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
- if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
+ if (unlikely(ctx->cqe_sentinel - ctx->cqe_cached < (cqe32 + 1))) {
+ if (unlikely(!io_cqe_cache_refill(ctx, overflow, cqe32)))
return false;
}
*ret = ctx->cqe_cached;
ctx->cached_cq_tail++;
ctx->cqe_cached++;
- if (ctx->flags & IORING_SETUP_CQE32)
+ if (ctx->flags & IORING_SETUP_CQE32) {
ctx->cqe_cached++;
+ } else if (cqe32 && ctx->flags & IORING_SETUP_CQE_MIXED) {
+ ctx->cqe_cached++;
+ ctx->cached_cq_tail++;
+ }
+ WARN_ON_ONCE(ctx->cqe_cached > ctx->cqe_sentinel);
return true;
}
-static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret)
+static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret,
+ bool cqe32)
{
- return io_get_cqe_overflow(ctx, ret, false);
+ return io_get_cqe_overflow(ctx, ret, false, cqe32);
+}
+
+static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx,
+ struct io_uring_cqe **cqe_ret)
+{
+ io_lockdep_assert_cq_locked(ctx);
+
+ ctx->submit_state.cq_flush = true;
+ return io_get_cqe(ctx, cqe_ret, ctx->flags & IORING_SETUP_CQE_MIXED);
}
static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
+ bool is_cqe32 = req->cqe.flags & IORING_CQE_F_32;
struct io_uring_cqe *cqe;
/*
- * If we can't get a cq entry, userspace overflowed the
- * submission (by quite a lot). Increment the overflow count in
- * the ring.
+ * If we can't get a cq entry, userspace overflowed the submission
+ * (by quite a lot).
*/
- if (unlikely(!io_get_cqe(ctx, &cqe)))
+ if (unlikely(!io_get_cqe(ctx, &cqe, is_cqe32)))
return false;
- if (trace_io_uring_complete_enabled())
- trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
- req->cqe.res, req->cqe.flags,
- req->big_cqe.extra1, req->big_cqe.extra2);
-
memcpy(cqe, &req->cqe, sizeof(*cqe));
- if (ctx->flags & IORING_SETUP_CQE32) {
+ if (ctx->flags & IORING_SETUP_CQE32 || is_cqe32) {
memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe));
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
+
+ if (trace_io_uring_complete_enabled())
+ trace_io_uring_complete(req->ctx, req, cqe);
return true;
}
@@ -215,11 +319,56 @@ static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
req->cqe.flags = cflags;
}
+static inline u32 ctx_cqe32_flags(struct io_ring_ctx *ctx)
+{
+ if (ctx->flags & IORING_SETUP_CQE_MIXED)
+ return IORING_CQE_F_32;
+ return 0;
+}
+
+static inline void io_req_set_res32(struct io_kiocb *req, s32 res, u32 cflags,
+ __u64 extra1, __u64 extra2)
+{
+ req->cqe.res = res;
+ req->cqe.flags = cflags | ctx_cqe32_flags(req->ctx);
+ req->big_cqe.extra1 = extra1;
+ req->big_cqe.extra2 = extra2;
+}
+
+static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache,
+ struct io_kiocb *req)
+{
+ if (cache) {
+ req->async_data = io_cache_alloc(cache, GFP_KERNEL);
+ } else {
+ const struct io_issue_def *def = &io_issue_defs[req->opcode];
+
+ WARN_ON_ONCE(!def->async_size);
+ req->async_data = kmalloc(def->async_size, GFP_KERNEL);
+ }
+ if (req->async_data)
+ req->flags |= REQ_F_ASYNC_DATA;
+ return req->async_data;
+}
+
static inline bool req_has_async_data(struct io_kiocb *req)
{
return req->flags & REQ_F_ASYNC_DATA;
}
+static inline void io_req_async_data_clear(struct io_kiocb *req,
+ io_req_flags_t extra_flags)
+{
+ req->flags &= ~(REQ_F_ASYNC_DATA|extra_flags);
+ req->async_data = NULL;
+}
+
+static inline void io_req_async_data_free(struct io_kiocb *req)
+{
+ kfree(req->async_data);
+ io_req_async_data_clear(req, 0);
+}
+
static inline void io_put_file(struct io_kiocb *req)
{
if (!(req->flags & REQ_F_FIXED_FILE) && req->file)
@@ -254,11 +403,22 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
+static inline void __io_wq_wake(struct wait_queue_head *wq)
+{
+ /*
+ *
+ * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
+ * set in the mask so that if we recurse back into our own poll
+ * waitqueue handlers, we know we have a dependency between eventfd or
+ * epoll and should terminate multishot poll at that point.
+ */
+ if (wq_has_sleeper(wq))
+ __wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+}
+
static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
{
- if (wq_has_sleeper(&ctx->poll_wq))
- __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
- poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+ __io_wq_wake(&ctx->poll_wq);
}
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
@@ -267,22 +427,23 @@ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
* Trigger waitqueue handler on all waiters on our waitqueue. This
* won't necessarily wake up all the tasks, io_should_wake() will make
* that decision.
- *
- * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
- * set in the mask so that if we recurse back into our own poll
- * waitqueue handlers, we know we have a dependency between eventfd or
- * epoll and should terminate multishot poll at that point.
*/
- if (wq_has_sleeper(&ctx->cq_wait))
- __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
- poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+
+ __io_wq_wake(&ctx->cq_wait);
}
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
- return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
+ /*
+ * SQPOLL must use the actual sqring head, as using the cached_sq_head
+ * is race prone if the SQPOLL thread has grabbed entries but not yet
+ * committed them to the ring. For !SQPOLL, this doesn't matter, but
+ * since this helper is just used for SQPOLL sqring waits (or POLLOUT),
+ * just read the actual sqring head unconditionally.
+ */
+ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
}
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
@@ -318,6 +479,7 @@ static inline int io_run_task_work(void)
if (current->io_uring) {
unsigned int count = 0;
+ __set_current_state(TASK_RUNNING);
tctx_task_work_run(current->io_uring, UINT_MAX, &count);
if (count)
ret = true;
@@ -332,12 +494,17 @@ static inline int io_run_task_work(void)
return ret;
}
+static inline bool io_local_work_pending(struct io_ring_ctx *ctx)
+{
+ return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist);
+}
+
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
- return task_work_pending(current) || !llist_empty(&ctx->work_llist);
+ return task_work_pending(current) || io_local_work_pending(ctx);
}
-static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw)
{
lockdep_assert_held(&ctx->uring_lock);
}
@@ -359,7 +526,7 @@ static inline void io_req_complete_defer(struct io_kiocb *req)
static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
+ if (unlikely(ctx->off_timeout_used ||
ctx->has_evfd || ctx->poll_activated))
__io_commit_cqring_flush(ctx);
}
@@ -379,7 +546,6 @@ static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
}
extern struct kmem_cache *req_cachep;
-extern struct kmem_cache *io_buf_cachep;
static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx)
{
@@ -418,17 +584,6 @@ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
io_req_task_work_add(req);
}
-/*
- * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each
- * slot.
- */
-static inline size_t uring_sqe_size(struct io_ring_ctx *ctx)
-{
- if (ctx->flags & IORING_SETUP_SQE128)
- return 2 * sizeof(struct io_uring_sqe);
- return sizeof(struct io_uring_sqe);
-}
-
static inline bool io_file_can_poll(struct io_kiocb *req)
{
if (req->flags & REQ_F_CAN_POLL)
@@ -440,6 +595,14 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
return false;
}
+static inline ktime_t io_get_time(struct io_ring_ctx *ctx)
+{
+ if (ctx->clockid == CLOCK_MONOTONIC)
+ return ktime_get();
+
+ return ktime_get_with_offset(ctx->clock_offset);
+}
+
enum {
IO_CHECK_CQ_OVERFLOW_BIT,
IO_CHECK_CQ_DROPPED_BIT,
@@ -448,6 +611,6 @@ enum {
static inline bool io_has_work(struct io_ring_ctx *ctx)
{
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
- !llist_empty(&ctx->work_llist);
+ io_local_work_pending(ctx);
}
#endif