summaryrefslogtreecommitdiff
path: root/io_uring/rw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c29
1 files changed, 11 insertions, 18 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 5b2241a5813c..331af6bf4234 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -186,7 +186,7 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags)
* This is really a bug in the core code that does this, any issue
* path should assume that a successful (or -EIOCBQUEUED) return can
* mean that the underlying data can be gone at any time. But that
- * should be fixed seperately, and then this check could be killed.
+ * should be fixed separately, and then this check could be killed.
*/
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -277,7 +277,6 @@ static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} else {
rw->kiocb.ki_ioprio = get_current_ioprio();
}
- rw->kiocb.dio_complete = NULL;
rw->kiocb.ki_flags = 0;
rw->kiocb.ki_write_stream = READ_ONCE(sqe->write_stream);
@@ -349,7 +348,7 @@ static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
/*
* Have to do this validation here, as this is in io_read() rw->len
- * might have chanaged due to buffer selection
+ * might have changed due to buffer selection
*/
return io_iov_buffer_select_prep(req);
}
@@ -463,7 +462,10 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
void io_readv_writev_cleanup(struct io_kiocb *req)
{
+ struct io_async_rw *rw = req->async_data;
+
lockdep_assert_held(&req->ctx->uring_lock);
+ io_vec_free(&rw->vec);
io_rw_recycle(req, 0);
}
@@ -564,16 +566,9 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
return res;
}
-void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
+void io_req_rw_complete(struct io_tw_req tw_req, io_tw_token_t tw)
{
- struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- struct kiocb *kiocb = &rw->kiocb;
-
- if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
- long res = kiocb->dio_complete(rw->kiocb.private);
-
- io_req_set_res(req, io_fixup_rw_res(req, res), 0);
- }
+ struct io_kiocb *req = tw_req.req;
io_req_io_end(req);
@@ -581,7 +576,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, NULL);
io_req_rw_cleanup(req, 0);
- io_req_task_complete(req, tw);
+ io_req_task_complete(tw_req, tw);
}
static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -589,10 +584,8 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw);
- if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
- __io_complete_rw_common(req, res);
- io_req_set_res(req, io_fixup_rw_res(req, res), 0);
- }
+ __io_complete_rw_common(req, res);
+ io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_rw_complete;
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
}
@@ -1019,7 +1012,7 @@ static int __io_read(struct io_kiocb *req, struct io_br_sel *sel,
iov_iter_restore(&io->iter, &io->iter_state);
} while (ret > 0);
done:
- /* it's faster to check here then delegate to kfree */
+ /* it's faster to check here than delegate to kfree */
return ret;
}