summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 10:40:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-13 10:40:31 -0800
commit96f7e448b9f4546ffd0356ffceb2b9586777f316 (patch)
tree099ba6679727c3d5d6e2e575ac994cf07019f68c /io_uring/net.c
parent54e60e505d6144a22c787b5be1fdce996a27be1b (diff)
parent761c61c15903db41343532882b0443addb8c2faf (diff)
Merge tag 'for-6.2/io_uring-next-2022-12-08' of git://git.kernel.dk/linux
Pull io_uring updates part two from Jens Axboe: - Misc fixes (me, Lin) - Series from Pavel extending the single task exclusive ring mode, yielding nice improvements for the common case of having a single ring per thread (Pavel) - Cleanup for MSG_RING, removing our IOPOLL hack (Pavel) - Further poll cleanups and fixes (Pavel) - Misc cleanups and fixes (Pavel) * tag 'for-6.2/io_uring-next-2022-12-08' of git://git.kernel.dk/linux: (22 commits) io_uring/msg_ring: flag target ring as having task_work, if needed io_uring: skip spinlocking for ->task_complete io_uring: do msg_ring in target task via tw io_uring: extract a io_msg_install_complete helper io_uring: get rid of double locking io_uring: never run tw and fallback in parallel io_uring: use tw for putting rsrc io_uring: force multishot CQEs into task context io_uring: complete all requests in task context io_uring: don't check overflow flush failures io_uring: skip overflow CQE posting for dying ring io_uring: improve io_double_lock_ctx fail handling io_uring: dont remove file from msg_ring reqs io_uring: reshuffle issue_flags io_uring: don't reinstall quiesce node for each tw io_uring: improve rsrc quiesce refs checks io_uring: don't raw spin unlock to match cq_lock io_uring: combine poll tw handlers io_uring: improve poll warning handling io_uring: remove ctx variable in io_poll_check_events ...
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index cb831326ea5b..5229976cb582 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -67,6 +67,19 @@ struct io_sr_msg {
struct io_kiocb *notif;
};
+static inline bool io_check_multishot(struct io_kiocb *req,
+ unsigned int issue_flags)
+{
+ /*
+ * When ->locked_cq is set we only allow to post CQEs from the original
+ * task context. Usual request completions will be handled in other
+ * generic paths but multipoll may decide to post extra cqes.
+ */
+ return !(issue_flags & IO_URING_F_IOWQ) ||
+ !(issue_flags & IO_URING_F_MULTISHOT) ||
+ !req->ctx->task_complete;
+}
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -730,6 +743,9 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return io_setup_async_msg(req, kmsg, issue_flags);
+ if (!io_check_multishot(req, issue_flags))
+ return io_setup_async_msg(req, kmsg, issue_flags);
+
retry_multishot:
if (io_do_buffer_select(req)) {
void __user *buf;
@@ -829,6 +845,9 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
(sr->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -1280,6 +1299,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags)
struct file *file;
int ret, fd;
+ if (!io_check_multishot(req, issue_flags))
+ return -EAGAIN;
retry:
if (!fixed) {
fd = __get_unused_fd_flags(accept->flags, accept->nofile);