diff options
-rw-r--r-- | io_uring/io_uring.c | 4 | ||||
-rw-r--r-- | io_uring/io_uring.h | 8 | ||||
-rw-r--r-- | io_uring/net.c | 47 | ||||
-rw-r--r-- | io_uring/poll.c | 3 | ||||
-rw-r--r-- | io_uring/rw.c | 11 |
5 files changed, 29 insertions, 44 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index fa342be39158..6499d8e4d3d0 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1791,10 +1791,8 @@ int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw) ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]); + WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE); WARN_ON_ONCE(ret == IOU_OK); - - if (ret == IOU_ISSUE_SKIP_COMPLETE) - ret = 0; return ret; } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index daf0e3b740ee..3409740f6417 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -23,6 +23,14 @@ enum { IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, /* + * The request has more work to do and should be retried. io_uring will + * attempt to wait on the file for eligible opcodes, but otherwise + * it'll be handed to iowq for blocking execution. It works for normal + * requests as well as for the multi shot mode. + */ + IOU_RETRY = -EAGAIN, + + /* * Requeue the task_work to restart operations on this request. The * actual value isn't important, should just be not an otherwise * valid error code, yet less than -MAX_ERRNO and valid internally. diff --git a/io_uring/net.c b/io_uring/net.c index 34a28689ec99..d9befb6fb8a7 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -898,8 +898,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, */ if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished && io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) { - int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE; - + *ret = IOU_RETRY; io_mshot_prep_retry(req, kmsg); /* Known not-empty or unknown state, retry */ if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { @@ -907,12 +906,9 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret, return false; /* mshot retries exceeded, force a requeue */ sr->nr_multishot_loops = 0; - mshot_retry_ret = IOU_REQUEUE; + if (issue_flags & IO_URING_F_MULTISHOT) + *ret = IOU_REQUEUE; } - if (issue_flags & IO_URING_F_MULTISHOT) - *ret = mshot_retry_ret; - else - *ret = -EAGAIN; return true; } @@ -1070,16 +1066,15 @@ retry_multishot: if (ret < min_ret) { if (ret == -EAGAIN && force_nonblock) { - if (issue_flags & IO_URING_F_MULTISHOT) { + if (issue_flags & IO_URING_F_MULTISHOT) io_kbuf_recycle(req, issue_flags); - return IOU_ISSUE_SKIP_COMPLETE; - } - return -EAGAIN; + + return IOU_RETRY; } if (ret > 0 && io_net_retry(sock, flags)) { sr->done_io += ret; req->flags |= REQ_F_BL_NO_RECYCLE; - return -EAGAIN; + return IOU_RETRY; } if (ret == -ERESTARTSYS) ret = -EINTR; @@ -1207,12 +1202,10 @@ retry_multishot: ret = sock_recvmsg(sock, &kmsg->msg, flags); if (ret < min_ret) { if (ret == -EAGAIN && force_nonblock) { - if (issue_flags & IO_URING_F_MULTISHOT) { + if (issue_flags & IO_URING_F_MULTISHOT) io_kbuf_recycle(req, issue_flags); - return IOU_ISSUE_SKIP_COMPLETE; - } - return -EAGAIN; + return IOU_RETRY; } if (ret > 0 && io_net_retry(sock, flags)) { sr->len -= ret; @@ -1312,10 +1305,7 @@ int io_recvzc(struct io_kiocb *req, unsigned int issue_flags) return IOU_STOP_MULTISHOT; return IOU_OK; } - - if (issue_flags & IO_URING_F_MULTISHOT) - return IOU_ISSUE_SKIP_COMPLETE; - return -EAGAIN; + return IOU_RETRY; } void io_send_zc_cleanup(struct io_kiocb *req) @@ -1692,16 +1682,9 @@ retry: put_unused_fd(fd); ret = PTR_ERR(file); if (ret == -EAGAIN && force_nonblock && - !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) { - /* - * if it's multishot and polled, we don't need to - * return EAGAIN to arm the poll infra since it - * has already been done - */ - if (issue_flags & IO_URING_F_MULTISHOT) - return IOU_ISSUE_SKIP_COMPLETE; - return ret; - } + !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) + return IOU_RETRY; + if (ret == -ERESTARTSYS) ret = -EINTR; } else if (!fixed) { @@ -1720,9 +1703,7 @@ retry: io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1) goto retry; - if (issue_flags & IO_URING_F_MULTISHOT) - return IOU_ISSUE_SKIP_COMPLETE; - return -EAGAIN; + return IOU_RETRY; } io_req_set_res(req, ret, cflags); diff --git a/io_uring/poll.c b/io_uring/poll.c index 176854882ba6..52e3c3e923f4 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -289,11 +289,12 @@ static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw) } } else { int ret = io_poll_issue(req, tw); + if (ret == IOU_STOP_MULTISHOT) return IOU_POLL_REMOVE_POLL_USE_RES; else if (ret == IOU_REQUEUE) return IOU_POLL_REQUEUE; - if (ret < 0) + if (ret != IOU_RETRY && ret < 0) return ret; } diff --git a/io_uring/rw.c b/io_uring/rw.c index bf35599d1078..9a9c636defad 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -1068,9 +1068,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) */ if (io_kbuf_recycle(req, issue_flags)) rw->len = 0; - if (issue_flags & IO_URING_F_MULTISHOT) - return IOU_ISSUE_SKIP_COMPLETE; - return -EAGAIN; + return IOU_RETRY; } else if (ret <= 0) { io_kbuf_recycle(req, issue_flags); if (ret < 0) @@ -1088,16 +1086,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) rw->len = 0; /* similarly to above, reset len to 0 */ if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) { - if (issue_flags & IO_URING_F_MULTISHOT) { + if (issue_flags & IO_URING_F_MULTISHOT) /* * Force retry, as we might have more data to * be read and otherwise it won't get retried * until (if ever) another poll is triggered. */ io_poll_multishot_retry(req); - return IOU_ISSUE_SKIP_COMPLETE; - } - return -EAGAIN; + + return IOU_RETRY; } } |