diff options
Diffstat (limited to 'io_uring/io_uring.h')
| -rw-r--r-- | io_uring/io_uring.h | 63 |
1 files changed, 29 insertions, 34 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 46d9141d772a..a790c16854d3 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -17,6 +17,20 @@ #include <trace/events/io_uring.h> #endif +struct io_rings_layout { + /* size of CQ + headers + SQ offset array */ + size_t rings_size; + size_t sq_size; + + size_t sq_array_offset; +}; + +struct io_ctx_config { + struct io_uring_params p; + struct io_rings_layout layout; + struct io_uring_params __user *uptr; +}; + #define IORING_FEAT_FLAGS (IORING_FEAT_SINGLE_MMAP |\ IORING_FEAT_NODROP |\ IORING_FEAT_SUBMIT_STABLE |\ @@ -54,7 +68,8 @@ IORING_SETUP_REGISTERED_FD_ONLY |\ IORING_SETUP_NO_SQARRAY |\ IORING_SETUP_HYBRID_IOPOLL |\ - IORING_SETUP_CQE_MIXED) + IORING_SETUP_CQE_MIXED |\ + IORING_SETUP_SQE_MIXED) #define IORING_ENTER_FLAGS (IORING_ENTER_GETEVENTS |\ IORING_ENTER_SQ_WAKEUP |\ @@ -95,6 +110,11 @@ enum { IOU_REQUEUE = -3072, }; +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; +}; + struct io_wait_queue { struct wait_queue_entry wq; struct io_ring_ctx *ctx; @@ -128,11 +148,11 @@ static inline bool io_should_wake(struct io_wait_queue *iowq) #define IORING_MAX_ENTRIES 32768 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) -unsigned long rings_size(unsigned int flags, unsigned int sq_entries, - unsigned int cq_entries, size_t *sq_offset); -int io_uring_fill_params(unsigned entries, struct io_uring_params *p); +int io_prepare_config(struct io_ctx_config *config); + bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow, bool cqe32); int io_run_task_work_sig(struct io_ring_ctx *ctx); +int io_run_local_work(struct io_ring_ctx *ctx, int min_events, int max_events); void io_req_defer_failed(struct io_kiocb *req, s32 res); bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); @@ -140,6 +160,7 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); bool io_req_post_cqe32(struct io_kiocb *req, struct io_uring_cqe src_cqe[2]); void __io_commit_cqring_flush(struct io_ring_ctx *ctx); +unsigned io_linked_nr(struct io_kiocb *req); void io_req_track_inflight(struct io_kiocb *req); struct file *io_file_get_normal(struct io_kiocb *req, int fd); struct file *io_file_get_fixed(struct io_kiocb *req, int fd, @@ -148,13 +169,13 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd, void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); void io_req_task_queue(struct io_kiocb *req); -void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); +void io_req_task_complete(struct io_tw_req tw_req, io_tw_token_t tw); void io_req_task_queue_fail(struct io_kiocb *req, int ret); -void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); +void io_req_task_submit(struct io_tw_req tw_req, io_tw_token_t tw); struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); void tctx_task_work(struct callback_head *cb); -__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); +__cold void io_uring_drop_tctx_refs(struct task_struct *task); int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, int start, int end); @@ -163,6 +184,7 @@ void io_req_queue_iowq(struct io_kiocb *req); int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw); int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); +__cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx); void __io_submit_flush_completions(struct io_ring_ctx *ctx); struct io_wq_work *io_wq_free_work(struct io_wq_work *work); @@ -173,9 +195,6 @@ void io_queue_next(struct io_kiocb *req); void io_task_refs_refill(struct io_uring_task *tctx); bool __io_alloc_req_refill(struct io_ring_ctx *ctx); -bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, - bool cancel_all); - void io_activate_pollwq(struct io_ring_ctx *ctx); static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) @@ -558,19 +577,6 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) ctx->submitter_task == current); } -/* - * Terminate the request if either of these conditions are true: - * - * 1) It's being executed by the original task, but that task is marked - * with PF_EXITING as it's exiting. - * 2) PF_KTHREAD is set, in which case the invoker of the task_work is - * our fallback task_work. - */ -static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) -{ - return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); -} - static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) { io_req_set_res(req, res, 0); @@ -578,17 +584,6 @@ static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) io_req_task_work_add(req); } -/* - * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each - * slot. - */ -static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) -{ - if (ctx->flags & IORING_SETUP_SQE128) - return 2 * sizeof(struct io_uring_sqe); - return sizeof(struct io_uring_sqe); -} - static inline bool io_file_can_poll(struct io_kiocb *req) { if (req->flags & REQ_F_CAN_POLL) |
