summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-09-26 14:35:09 +0100
committerJens Axboe <axboe@kernel.dk>2022-09-26 08:36:50 -0600
commit4c17a496a7a0730fdfc9e249b83cc58249111532 (patch)
treeb1b8ef69df833c6265c4cafdaf43d29b59cd7724 /io_uring
parentaa1df3a360a0c50e0f0086a785d75c2785c29967 (diff)
io_uring/net: fix cleanup double free free_iov init
Having ->async_data doesn't mean it's initialised and previously we vere relying on setting F_CLEANUP at the right moment. With zc sendmsg though, we set F_CLEANUP early in prep when we alloc a notif and so we may allocate async_data, fail in copy_msg_hdr() leaving struct io_async_msghdr not initialised correctly but with F_CLEANUP set, which causes a ->free_iov double free and probably other nastiness. Always initialise ->free_iov. Also, now it might point to fast_iov when fails, so avoid freeing it during cleanups. Reported-by: syzbot+edfd15cd4246a3fc615a@syzkaller.appspotmail.com Fixes: 493108d95f146 ("io_uring/net: zerocopy sendmsg") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/net.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 2af56661590a..6b69eff6887e 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -124,20 +124,22 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
{
struct io_ring_ctx *ctx = req->ctx;
struct io_cache_entry *entry;
+ struct io_async_msghdr *hdr;
if (!(issue_flags & IO_URING_F_UNLOCKED) &&
(entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
- struct io_async_msghdr *hdr;
-
hdr = container_of(entry, struct io_async_msghdr, cache);
+ hdr->free_iov = NULL;
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = hdr;
return hdr;
}
- if (!io_alloc_async_data(req))
- return req->async_data;
-
+ if (!io_alloc_async_data(req)) {
+ hdr = req->async_data;
+ hdr->free_iov = NULL;
+ return hdr;
+ }
return NULL;
}
@@ -192,7 +194,6 @@ int io_send_prep_async(struct io_kiocb *req)
io = io_msg_alloc_async_prep(req);
if (!io)
return -ENOMEM;
- io->free_iov = NULL;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
return ret;
}
@@ -209,7 +210,6 @@ static int io_setup_async_addr(struct io_kiocb *req,
io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM;
- io->free_iov = NULL;
memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN;
}
@@ -479,7 +479,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (msg.msg_iovlen == 0) {
sr->len = 0;
- iomsg->free_iov = NULL;
} else if (msg.msg_iovlen > 1) {
return -EINVAL;
} else {
@@ -490,7 +489,6 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
if (clen < 0)
return -EINVAL;
sr->len = clen;
- iomsg->free_iov = NULL;
}
if (req->flags & REQ_F_APOLL_MULTISHOT) {
@@ -913,7 +911,9 @@ void io_send_zc_cleanup(struct io_kiocb *req)
if (req_has_async_data(req)) {
io = req->async_data;
- kfree(io->free_iov);
+ /* might be ->fast_iov if *msg_copy_hdr failed */
+ if (io->free_iov != io->fast_iov)
+ kfree(io->free_iov);
}
if (zc->notif) {
zc->notif->flags |= REQ_F_CQE_SKIP;