summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-11-04 10:59:46 +0000
committerJens Axboe <axboe@kernel.dk>2022-11-21 07:38:31 -0700
commit42385b02baad0df55474b7f36dc13e0d4ffd0cc0 (patch)
tree4a03a31927cb0927c452e40b2a2d8741d023b1e8 /io_uring/net.c
parent40725d1b960f19a11a1ebd1ab537844ebf39347c (diff)
io_uring/net: move mm accounting to a slower path
We can also move mm accounting to the extended callbacks. It removes a few cycles from the hot path including skipping one function call and setting io_req_task_complete as a callback directly. For user backed I/O it shouldn't make any difference taking into considering atomic mm accounting and page pinning. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1062f270273ad11c1b7b45ec59a6a317533d5e64.1667557923.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 9e3da845f906..966019fcbe8c 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1097,6 +1097,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return ret;
msg.sg_from_iter = io_sg_from_iter;
} else {
+ io_notif_set_extended(zc->notif);
ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
&msg.msg_iter);
if (unlikely(ret))
@@ -1158,6 +1159,8 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
+ io_notif_set_extended(sr->notif);
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;