summaryrefslogtreecommitdiff
path: root/io_uring/msg_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r--io_uring/msg_ring.c53
1 files changed, 14 insertions, 39 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 333c220d322a..7063ea7964e7 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -11,7 +11,6 @@
#include "io_uring.h"
#include "rsrc.h"
#include "filetable.h"
-#include "alloc_cache.h"
#include "msg_ring.h"
/* All valid masks for MSG_RING */
@@ -38,8 +37,8 @@ static void io_double_unlock_ctx(struct io_ring_ctx *octx)
mutex_unlock(&octx->uring_lock);
}
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
+static int io_lock_external_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
{
/*
* To ensure proper ordering between the two ctxs, we can only
@@ -71,58 +70,41 @@ static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
return target_ctx->task_complete;
}
-static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
+static void io_msg_tw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_kiocb *req = tw_req.req;
struct io_ring_ctx *ctx = req->ctx;
io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
- if (spin_trylock(&ctx->msg_lock)) {
- if (io_alloc_cache_put(&ctx->msg_cache, req))
- req = NULL;
- spin_unlock(&ctx->msg_lock);
- }
- if (req)
- kmem_cache_free(req_cachep, req);
+ kfree_rcu(req, rcu_head);
percpu_ref_put(&ctx->refs);
}
static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
int res, u32 cflags, u64 user_data)
{
- req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
- if (!req->tctx) {
- kmem_cache_free(req_cachep, req);
+ if (!READ_ONCE(ctx->submitter_task)) {
+ kfree_rcu(req, rcu_head);
return -EOWNERDEAD;
}
+ req->opcode = IORING_OP_NOP;
req->cqe.user_data = user_data;
io_req_set_res(req, res, cflags);
percpu_ref_get(&ctx->refs);
req->ctx = ctx;
+ req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
- io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
return 0;
}
-static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
-{
- struct io_kiocb *req = NULL;
-
- if (spin_trylock(&ctx->msg_lock)) {
- req = io_alloc_cache_get(&ctx->msg_cache);
- spin_unlock(&ctx->msg_lock);
- if (req)
- return req;
- }
- return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
-}
-
static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
struct io_msg *msg)
{
struct io_kiocb *target;
u32 flags = 0;
- target = io_msg_get_kiocb(target_ctx);
+ target = kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO) ;
if (unlikely(!target))
return -ENOMEM;
@@ -154,7 +136,7 @@ static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
ret = -EOVERFLOW;
if (target_ctx->flags & IORING_SETUP_IOPOLL) {
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
}
if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
@@ -199,7 +181,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
struct file *src_file = msg->src_file;
int ret;
- if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
return -EAGAIN;
ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
@@ -327,7 +309,7 @@ done:
req_set_fail(req);
}
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
@@ -354,10 +336,3 @@ int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
return __io_msg_ring_data(fd_file(f)->private_data,
&io_msg, IO_URING_F_UNLOCKED);
}
-
-void io_msg_cache_free(const void *entry)
-{
- struct io_kiocb *req = (struct io_kiocb *) entry;
-
- kmem_cache_free(req_cachep, req);
-}