summaryrefslogtreecommitdiff
path: root/io_uring/net.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c924
1 files changed, 500 insertions, 424 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index df1f7dc6f1c8..519ea055b761 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -10,14 +10,15 @@
#include <uapi/linux/io_uring.h>
+#include "filetable.h"
#include "io_uring.h"
#include "kbuf.h"
#include "alloc_cache.h"
#include "net.h"
#include "notif.h"
#include "rsrc.h"
+#include "zcrx.h"
-#if defined(CONFIG_NET)
struct io_shutdown {
struct file *file;
int how;
@@ -75,19 +76,50 @@ struct io_sr_msg {
u16 flags;
/* initialised and used only by !msg send variants */
u16 buf_group;
- u16 buf_index;
+ /* per-invocation mshot limit */
+ unsigned mshot_len;
+ /* overall mshot byte limit */
+ unsigned mshot_total_len;
void __user *msg_control;
/* used only for send zerocopy */
struct io_kiocb *notif;
};
/*
+ * The UAPI flags are the lower 8 bits, as that's all sqe->ioprio will hold
+ * anyway. Use the upper 8 bits for internal uses.
+ */
+enum sr_retry_flags {
+ IORING_RECV_RETRY = (1U << 15),
+ IORING_RECV_PARTIAL_MAP = (1U << 14),
+ IORING_RECV_MSHOT_CAP = (1U << 13),
+ IORING_RECV_MSHOT_LIM = (1U << 12),
+ IORING_RECV_MSHOT_DONE = (1U << 11),
+
+ IORING_RECV_RETRY_CLEAR = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP,
+ IORING_RECV_NO_RETRY = IORING_RECV_RETRY | IORING_RECV_PARTIAL_MAP |
+ IORING_RECV_MSHOT_CAP | IORING_RECV_MSHOT_DONE,
+};
+
+/*
* Number of times we'll try and do receives if there's more data. If we
* exceed this limit, then add us to the back of the queue and retry from
* there. This helps fairness between flooding clients.
*/
#define MULTISHOT_MAX_RETRY 32
+struct io_recvzc {
+ struct file *file;
+ u16 flags;
+ u32 len;
+ struct io_zcrx_ifq *ifq;
+};
+
+static int io_sg_from_iter_iovec(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+static int io_sg_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@@ -115,7 +147,7 @@ int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_shutdown_sock(sock, shutdown->how);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
static bool io_net_retry(struct socket *sock, int flags)
@@ -127,17 +159,13 @@ static bool io_net_retry(struct socket *sock, int flags)
static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
{
- if (kmsg->free_iov) {
- kfree(kmsg->free_iov);
- kmsg->free_iov_nr = 0;
- kmsg->free_iov = NULL;
- }
+ if (kmsg->vec.iovec)
+ io_vec_free(&kmsg->vec);
}
static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_async_msghdr *hdr = req->async_data;
- struct iovec *iov;
/* can't recycle, ensure we free the iovec if we have one */
if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
@@ -146,13 +174,12 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
}
/* Let normal cleanup path reap it if we fail adding to the cache */
- iov = hdr->free_iov;
- if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
- if (iov)
- kasan_mempool_poison_object(iov);
- req->async_data = NULL;
- req->flags &= ~REQ_F_ASYNC_DATA;
- }
+ io_alloc_cache_vec_kasan(&hdr->vec);
+ if (hdr->vec.nr > IO_VEC_CACHE_SOFT_CAP)
+ io_vec_free(&hdr->vec);
+
+ if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr))
+ io_req_async_data_clear(req, REQ_F_NEED_CLEANUP);
}
static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
@@ -160,39 +187,14 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
struct io_async_msghdr *hdr;
- hdr = io_alloc_cache_get(&ctx->netmsg_cache);
- if (hdr) {
- if (hdr->free_iov) {
- kasan_mempool_unpoison_object(hdr->free_iov,
- hdr->free_iov_nr * sizeof(struct iovec));
- req->flags |= REQ_F_NEED_CLEANUP;
- }
- req->flags |= REQ_F_ASYNC_DATA;
- req->async_data = hdr;
- return hdr;
- }
+ hdr = io_uring_alloc_async_data(&ctx->netmsg_cache, req);
+ if (!hdr)
+ return NULL;
- if (!io_alloc_async_data(req)) {
- hdr = req->async_data;
- hdr->free_iov_nr = 0;
- hdr->free_iov = NULL;
- return hdr;
- }
- return NULL;
-}
-
-/* assign new iovec to kmsg, if we need to */
-static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
- struct iovec *iov)
-{
- if (iov) {
+ /* If the async data was cached, we might have an iov cached inside. */
+ if (hdr->vec.iovec)
req->flags |= REQ_F_NEED_CLEANUP;
- kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
- if (kmsg->free_iov)
- kfree(kmsg->free_iov);
- kmsg->free_iov = iov;
- }
- return 0;
+ return hdr;
}
static inline void io_mshot_prep_retry(struct io_kiocb *req,
@@ -202,151 +204,139 @@ static inline void io_mshot_prep_retry(struct io_kiocb *req,
req->flags &= ~REQ_F_BL_EMPTY;
sr->done_io = 0;
- sr->len = 0; /* get from the provided buffer */
- req->buf_index = sr->buf_group;
+ sr->flags &= ~IORING_RECV_RETRY_CLEAR;
+ sr->len = sr->mshot_len;
}
-#ifdef CONFIG_COMPAT
-static int io_compat_msg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg,
- struct compat_msghdr *msg, int ddir)
+static int io_net_import_vec(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ const struct iovec __user *uiov, unsigned uvec_seg,
+ int ddir)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct compat_iovec __user *uiov;
struct iovec *iov;
int ret, nr_segs;
- if (iomsg->free_iov) {
- nr_segs = iomsg->free_iov_nr;
- iov = iomsg->free_iov;
+ if (iomsg->vec.iovec) {
+ nr_segs = iomsg->vec.nr;
+ iov = iomsg->vec.iovec;
} else {
- iov = &iomsg->fast_iov;
nr_segs = 1;
+ iov = &iomsg->fast_iov;
}
+ ret = __import_iovec(ddir, uiov, uvec_seg, nr_segs, &iov,
+ &iomsg->msg.msg_iter, io_is_compat(req->ctx));
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (iov) {
+ req->flags |= REQ_F_NEED_CLEANUP;
+ io_vec_reset_iovec(&iomsg->vec, iov, iomsg->msg.msg_iter.nr_segs);
+ }
+ return 0;
+}
+
+static int io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ struct compat_msghdr *msg, int ddir,
+ struct sockaddr __user **save_addr)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct compat_iovec __user *uiov;
+ int ret;
+
if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
return -EFAULT;
+ ret = __get_compat_msghdr(&iomsg->msg, msg, save_addr);
+ if (ret)
+ return ret;
+
uiov = compat_ptr(msg->msg_iov);
if (req->flags & REQ_F_BUFFER_SELECT) {
- compat_ssize_t clen;
-
if (msg->msg_iovlen == 0) {
- sr->len = iov->iov_len = 0;
- iov->iov_base = NULL;
+ sr->len = 0;
} else if (msg->msg_iovlen > 1) {
return -EINVAL;
} else {
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
+ struct compat_iovec tmp_iov;
+
+ if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
return -EFAULT;
- if (clen < 0)
- return -EINVAL;
- sr->len = clen;
+ sr->len = tmp_iov.iov_len;
}
-
- return 0;
}
-
- ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
- nr_segs, &iov, &iomsg->msg.msg_iter, true);
- if (unlikely(ret < 0))
- return ret;
-
- return io_net_vec_assign(req, iomsg, iov);
+ return 0;
}
-#endif
-static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
- struct user_msghdr *msg, int ddir)
+static int io_copy_msghdr_from_user(struct user_msghdr *msg,
+ struct user_msghdr __user *umsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr __user *umsg = sr->umsg;
- struct iovec *iov;
- int ret, nr_segs;
-
- if (iomsg->free_iov) {
- nr_segs = iomsg->free_iov_nr;
- iov = iomsg->free_iov;
- } else {
- iov = &iomsg->fast_iov;
- nr_segs = 1;
- }
-
if (!user_access_begin(umsg, sizeof(*umsg)))
return -EFAULT;
-
- ret = -EFAULT;
unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
- msg->msg_flags = 0;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- if (msg->msg_iovlen == 0) {
- sr->len = iov->iov_len = 0;
- iov->iov_base = NULL;
- } else if (msg->msg_iovlen > 1) {
- ret = -EINVAL;
- goto ua_end;
- } else {
- /* we only need the length for provided buffers */
- if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
- goto ua_end;
- unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
- ua_end);
- sr->len = iov->iov_len;
- }
- ret = 0;
+ user_access_end();
+ return 0;
ua_end:
- user_access_end();
- return ret;
- }
-
user_access_end();
- ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
- &iov, &iomsg->msg.msg_iter, false);
- if (unlikely(ret < 0))
- return ret;
-
- return io_net_vec_assign(req, iomsg, iov);
+ return -EFAULT;
}
-static int io_sendmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct user_msghdr *msg, int ddir,
+ struct sockaddr __user **save_addr)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct user_msghdr msg;
+ struct user_msghdr __user *umsg = sr->umsg;
int ret;
iomsg->msg.msg_name = &iomsg->addr;
iomsg->msg.msg_iter.nr_segs = 0;
-#ifdef CONFIG_COMPAT
- if (unlikely(req->ctx->compat)) {
+ if (io_is_compat(req->ctx)) {
struct compat_msghdr cmsg;
- ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
- if (unlikely(ret))
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ddir, save_addr);
+ if (ret)
return ret;
- return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ memset(msg, 0, sizeof(*msg));
+ msg->msg_namelen = cmsg.msg_namelen;
+ msg->msg_controllen = cmsg.msg_controllen;
+ msg->msg_iov = compat_ptr(cmsg.msg_iov);
+ msg->msg_iovlen = cmsg.msg_iovlen;
+ return 0;
}
-#endif
- ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
+ ret = io_copy_msghdr_from_user(msg, umsg);
if (unlikely(ret))
return ret;
- ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
+ msg->msg_flags = 0;
- /* save msg_control as sys_sendmsg() overwrites it */
- sr->msg_control = iomsg->msg.msg_control_user;
- return ret;
+ ret = __copy_msghdr(&iomsg->msg, msg, save_addr);
+ if (ret)
+ return ret;
+
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (msg->msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg->msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ struct iovec __user *uiov = msg->msg_iov;
+ struct iovec tmp_iov;
+
+ if (copy_from_user(&tmp_iov, uiov, sizeof(tmp_iov)))
+ return -EFAULT;
+ sr->len = tmp_iov.iov_len;
+ }
+ }
+ return 0;
}
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
@@ -384,42 +374,50 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
kmsg->msg.msg_name = &kmsg->addr;
kmsg->msg.msg_namelen = addr_len;
}
- if (!io_do_buffer_select(req)) {
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
- if (unlikely(ret < 0))
- return ret;
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ req->flags |= REQ_F_IMPORT_BUFFER;
+ return 0;
}
- return 0;
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+
+ if (sr->flags & IORING_SEND_VECTORIZED)
+ return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+
+ return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
}
static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
+ struct user_msghdr msg;
int ret;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ ret = io_msg_copy_hdr(req, kmsg, &msg, ITER_SOURCE, NULL);
+ if (unlikely(ret))
+ return ret;
+ /* save msg_control as sys_sendmsg() overwrites it */
+ sr->msg_control = kmsg->msg.msg_control_user;
- ret = io_sendmsg_copy_hdr(req, kmsg);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+ kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
+ return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov,
+ msg.msg_iovlen);
+ }
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
}
-#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
+#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED)
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
sr->done_io = 0;
-
- if (req->opcode != IORING_OP_SEND) {
- if (sqe->addr2 || sqe->file_index)
- return -EINVAL;
- }
-
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
if (sr->flags & ~SENDMSG_FLAGS)
@@ -427,31 +425,30 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ sr->buf_group = req->buf_index;
if (sr->flags & IORING_RECVSEND_BUNDLE) {
if (req->opcode == IORING_OP_SENDMSG)
return -EINVAL;
- if (!(req->flags & REQ_F_BUFFER_SELECT))
- return -EINVAL;
sr->msg_flags |= MSG_WAITALL;
- sr->buf_group = req->buf_index;
- req->buf_list = NULL;
+ req->flags |= REQ_F_MULTISHOT;
}
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
+ if (io_is_compat(req->ctx))
sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
+
if (unlikely(!io_msg_alloc_async(req)))
return -ENOMEM;
if (req->opcode != IORING_OP_SENDMSG)
return io_send_setup(req, sqe);
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
return io_sendmsg_setup(req, sqe);
}
static void io_req_msg_cleanup(struct io_kiocb *req,
unsigned int issue_flags)
{
- req->flags &= ~REQ_F_NEED_CLEANUP;
io_netmsg_recycle(req, issue_flags);
}
@@ -474,7 +471,7 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
if (iter_is_ubuf(&kmsg->msg.msg_iter))
return 1;
- iov = kmsg->free_iov;
+ iov = kmsg->vec.iovec;
if (!iov)
iov = &kmsg->fast_iov;
@@ -494,20 +491,29 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
return nbufs;
}
-static inline bool io_send_finish(struct io_kiocb *req, int *ret,
+static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl,
+ struct io_async_msghdr *kmsg, int len)
+{
+ req->flags |= REQ_F_BL_NO_RECYCLE;
+ if (req->flags & REQ_F_BUFFERS_COMMIT)
+ io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len));
+ return IOU_RETRY;
+}
+
+static inline bool io_send_finish(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
- unsigned issue_flags)
+ struct io_br_sel *sel)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- bool bundle_finished = *ret <= 0;
+ bool bundle_finished = sel->val <= 0;
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
- cflags = io_put_kbuf(req, *ret, issue_flags);
+ cflags = io_put_kbuf(req, sel->val, sel->buf_list);
goto finish;
}
- cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
+ cflags = io_put_kbufs(req, sel->val, sel->buf_list, io_bundle_nbufs(kmsg, sel->val));
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
@@ -516,15 +522,15 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
* Fill CQE for this receive and see if we should keep trying to
* receive from this socket.
*/
- if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
+ if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
io_mshot_prep_retry(req, kmsg);
return false;
}
/* Otherwise stop bundle and use the current result. */
finish:
- io_req_set_res(req, *ret, cflags);
- *ret = IOU_OK;
+ io_req_set_res(req, sel->val, cflags);
+ sel->val = IOU_COMPLETE;
return true;
}
@@ -562,7 +568,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -575,13 +580,62 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
else if (sr->done_io)
ret = sr->done_io;
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
+}
+
+static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags,
+ struct io_br_sel *sel, struct io_async_msghdr *kmsg)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct buf_sel_arg arg = {
+ .iovs = &kmsg->fast_iov,
+ .max_len = min_not_zero(sr->len, INT_MAX),
+ .nr_iovs = 1,
+ .buf_group = sr->buf_group,
+ };
+ int ret;
+
+ if (kmsg->vec.iovec) {
+ arg.nr_iovs = kmsg->vec.nr;
+ arg.iovs = kmsg->vec.iovec;
+ arg.mode = KBUF_MODE_FREE;
+ }
+
+ if (!(sr->flags & IORING_RECVSEND_BUNDLE))
+ arg.nr_iovs = 1;
+ else
+ arg.mode |= KBUF_MODE_EXPAND;
+
+ ret = io_buffers_select(req, &arg, sel, issue_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
+ kmsg->vec.nr = ret;
+ kmsg->vec.iovec = arg.iovs;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+ sr->len = arg.out_len;
+
+ if (ret == 1) {
+ sr->buf = arg.iovs[0].iov_base;
+ ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ } else {
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
+ arg.iovs, ret, arg.out_len);
+ }
+
+ return 0;
}
int io_send(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel = { };
struct socket *sock;
unsigned flags;
int min_ret = 0;
@@ -600,45 +654,11 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
flags |= MSG_DONTWAIT;
retry_bundle:
+ sel.buf_list = NULL;
if (io_do_buffer_select(req)) {
- struct buf_sel_arg arg = {
- .iovs = &kmsg->fast_iov,
- .max_len = min_not_zero(sr->len, INT_MAX),
- .nr_iovs = 1,
- };
-
- if (kmsg->free_iov) {
- arg.nr_iovs = kmsg->free_iov_nr;
- arg.iovs = kmsg->free_iov;
- arg.mode = KBUF_MODE_FREE;
- }
-
- if (!(sr->flags & IORING_RECVSEND_BUNDLE))
- arg.nr_iovs = 1;
- else
- arg.mode |= KBUF_MODE_EXPAND;
-
- ret = io_buffers_select(req, &arg, issue_flags);
- if (unlikely(ret < 0))
+ ret = io_send_select_buffer(req, issue_flags, &sel, kmsg);
+ if (ret)
return ret;
-
- if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
- kmsg->free_iov_nr = ret;
- kmsg->free_iov = arg.iovs;
- req->flags |= REQ_F_NEED_CLEANUP;
- }
- sr->len = arg.out_len;
-
- if (ret == 1) {
- sr->buf = arg.iovs[0].iov_base;
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- } else {
- iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
- arg.iovs, ret, arg.out_len);
- }
}
/*
@@ -660,8 +680,7 @@ retry_bundle:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -672,11 +691,12 @@ retry_bundle:
else if (sr->done_io)
ret = sr->done_io;
- if (!io_send_finish(req, &ret, kmsg, issue_flags))
+ sel.val = ret;
+ if (!io_send_finish(req, kmsg, &sel))
goto retry_bundle;
io_req_msg_cleanup(req, issue_flags);
- return ret;
+ return sel.val;
}
static int io_recvmsg_mshot_prep(struct io_kiocb *req,
@@ -709,34 +729,16 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
struct user_msghdr msg;
int ret;
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->msg.msg_iter.nr_segs = 0;
-
-#ifdef CONFIG_COMPAT
- if (unlikely(req->ctx->compat)) {
- struct compat_msghdr cmsg;
-
- ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
- if (unlikely(ret))
- return ret;
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST, &iomsg->uaddr);
+ if (unlikely(ret))
+ return ret;
- ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
+ if (!(req->flags & REQ_F_BUFFER_SELECT)) {
+ ret = io_net_import_vec(req, iomsg, msg.msg_iov, msg.msg_iovlen,
+ ITER_DEST);
if (unlikely(ret))
return ret;
-
- return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
- cmsg.msg_controllen);
}
-#endif
-
- ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
- if (unlikely(ret))
- return ret;
-
- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (unlikely(ret))
- return ret;
-
return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
msg.msg_controllen);
}
@@ -745,7 +747,6 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg;
- int ret;
kmsg = io_msg_alloc_async(req);
if (unlikely(!kmsg))
@@ -754,25 +755,20 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
if (req->opcode == IORING_OP_RECV) {
kmsg->msg.msg_name = NULL;
kmsg->msg.msg_namelen = 0;
+ kmsg->msg.msg_inq = 0;
kmsg->msg.msg_control = NULL;
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_iocb = NULL;
kmsg->msg.msg_ubuf = NULL;
- if (!io_do_buffer_select(req)) {
- ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- }
- return 0;
+ if (req->flags & REQ_F_BUFFER_SELECT)
+ return 0;
+ return import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
}
- ret = io_recvmsg_copy_hdr(req, kmsg);
- if (!ret)
- req->flags |= REQ_F_NEED_CLEANUP;
- return ret;
+ return io_recvmsg_copy_hdr(req, kmsg);
}
#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
@@ -784,7 +780,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->done_io = 0;
- if (unlikely(sqe->file_index || sqe->addr2))
+ if (unlikely(sqe->addr2))
return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -797,49 +793,52 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->flags |= REQ_F_NOWAIT;
if (sr->msg_flags & MSG_ERRQUEUE)
req->flags |= REQ_F_CLEAR_POLLIN;
- if (req->flags & REQ_F_BUFFER_SELECT) {
- /*
- * Store the buffer group for this multishot receive separately,
- * as if we end up doing an io-wq based issue that selects a
- * buffer, it has to be committed immediately and that will
- * clear ->buf_list. This means we lose the link to the buffer
- * list, and the eventual buffer put on completion then cannot
- * restore it.
- */
+ if (req->flags & REQ_F_BUFFER_SELECT)
sr->buf_group = req->buf_index;
- req->buf_list = NULL;
- }
+ sr->mshot_total_len = sr->mshot_len = 0;
if (sr->flags & IORING_RECV_MULTISHOT) {
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
if (sr->msg_flags & MSG_WAITALL)
return -EINVAL;
- if (req->opcode == IORING_OP_RECV && sr->len)
+ if (req->opcode == IORING_OP_RECV) {
+ sr->mshot_len = sr->len;
+ sr->mshot_total_len = READ_ONCE(sqe->optlen);
+ if (sr->mshot_total_len)
+ sr->flags |= IORING_RECV_MSHOT_LIM;
+ } else if (sqe->optlen) {
return -EINVAL;
+ }
req->flags |= REQ_F_APOLL_MULTISHOT;
+ } else if (sqe->optlen) {
+ return -EINVAL;
}
+
if (sr->flags & IORING_RECVSEND_BUNDLE) {
if (req->opcode == IORING_OP_RECVMSG)
return -EINVAL;
}
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
+ if (io_is_compat(req->ctx))
sr->msg_flags |= MSG_CMSG_COMPAT;
-#endif
+
sr->nr_multishot_loops = 0;
return io_recvmsg_prep_setup(req);
}
+/* bits to clear in old and inherit in new cflags on bundle retry */
+#define CQE_F_MASK (IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
+
/*
* Finishes io_recv and io_recvmsg.
*
* Returns true if it is actually finished, or false if it should run
* again (for multishot).
*/
-static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
+static inline bool io_recv_finish(struct io_kiocb *req,
struct io_async_msghdr *kmsg,
- bool mshot_finished, unsigned issue_flags)
+ struct io_br_sel *sel, bool mshot_finished,
+ unsigned issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
unsigned int cflags = 0;
@@ -847,14 +846,45 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+ if (sel->val > 0 && sr->flags & IORING_RECV_MSHOT_LIM) {
+ /*
+ * If sr->len hits zero, the limit has been reached. Mark
+ * mshot as finished, and flag MSHOT_DONE as well to prevent
+ * a potential bundle from being retried.
+ */
+ sr->mshot_total_len -= min_t(int, sel->val, sr->mshot_total_len);
+ if (!sr->mshot_total_len) {
+ sr->flags |= IORING_RECV_MSHOT_DONE;
+ mshot_finished = true;
+ }
+ }
+
if (sr->flags & IORING_RECVSEND_BUNDLE) {
- cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret),
- issue_flags);
+ size_t this_ret = sel->val - sr->done_io;
+
+ cflags |= io_put_kbufs(req, this_ret, sel->buf_list, io_bundle_nbufs(kmsg, this_ret));
+ if (sr->flags & IORING_RECV_RETRY)
+ cflags = req->cqe.flags | (cflags & CQE_F_MASK);
+ if (sr->mshot_len && sel->val >= sr->mshot_len)
+ sr->flags |= IORING_RECV_MSHOT_CAP;
/* bundle with no more immediate buffers, we're done */
if (req->flags & REQ_F_BL_EMPTY)
goto finish;
+ /*
+ * If more is available AND it was a full transfer, retry and
+ * append to this one
+ */
+ if (!(sr->flags & IORING_RECV_NO_RETRY) &&
+ kmsg->msg.msg_inq > 1 && this_ret > 0 &&
+ !iov_iter_count(&kmsg->msg.msg_iter)) {
+ req->cqe.flags = cflags & ~CQE_F_MASK;
+ sr->len = kmsg->msg.msg_inq;
+ sr->done_io += this_ret;
+ sr->flags |= IORING_RECV_RETRY;
+ return false;
+ }
} else {
- cflags |= io_put_kbuf(req, *ret, issue_flags);
+ cflags |= io_put_kbuf(req, sel->val, sel->buf_list);
}
/*
@@ -862,33 +892,28 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
* receive from this socket.
*/
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
- io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
- int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
-
+ io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) {
+ sel->val = IOU_RETRY;
io_mshot_prep_retry(req, kmsg);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
- if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
+ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY &&
+ !(sr->flags & IORING_RECV_MSHOT_CAP)) {
return false;
+ }
/* mshot retries exceeded, force a requeue */
sr->nr_multishot_loops = 0;
- mshot_retry_ret = IOU_REQUEUE;
+ sr->flags &= ~IORING_RECV_MSHOT_CAP;
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ sel->val = IOU_REQUEUE;
}
- if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = mshot_retry_ret;
- else
- *ret = -EAGAIN;
return true;
}
/* Finish the request / stop multishot. */
finish:
- io_req_set_res(req, *ret, cflags);
-
- if (issue_flags & IO_URING_F_MULTISHOT)
- *ret = IOU_STOP_MULTISHOT;
- else
- *ret = IOU_OK;
+ io_req_set_res(req, sel->val, cflags);
+ sel->val = IOU_COMPLETE;
io_req_msg_cleanup(req, issue_flags);
return true;
}
@@ -981,6 +1006,7 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel = { };
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
@@ -1000,23 +1026,23 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
flags |= MSG_DONTWAIT;
retry_multishot:
+ sel.buf_list = NULL;
if (io_do_buffer_select(req)) {
- void __user *buf;
size_t len = sr->len;
- buf = io_buffer_select(req, &len, issue_flags);
- if (!buf)
+ sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
+ if (!sel.addr)
return -ENOBUFS;
if (req->flags & REQ_F_APOLL_MULTISHOT) {
- ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
+ ret = io_recvmsg_prep_multishot(kmsg, sr, &sel.addr, &len);
if (ret) {
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
return ret;
}
}
- iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
+ iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, sel.addr, len);
}
kmsg->msg.msg_get_inq = 1;
@@ -1035,16 +1061,12 @@ retry_multishot:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if (issue_flags & IO_URING_F_MULTISHOT) {
- io_kbuf_recycle(req, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
- return -EAGAIN;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1058,16 +1080,17 @@ retry_multishot:
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- return ret;
+ return sel.val;
}
static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
- size_t *len, unsigned int issue_flags)
+ struct io_br_sel *sel, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
@@ -1083,21 +1106,35 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
.iovs = &kmsg->fast_iov,
.nr_iovs = 1,
.mode = KBUF_MODE_EXPAND,
+ .buf_group = sr->buf_group,
};
- if (kmsg->free_iov) {
- arg.nr_iovs = kmsg->free_iov_nr;
- arg.iovs = kmsg->free_iov;
+ if (kmsg->vec.iovec) {
+ arg.nr_iovs = kmsg->vec.nr;
+ arg.iovs = kmsg->vec.iovec;
arg.mode |= KBUF_MODE_FREE;
}
- if (kmsg->msg.msg_inq > 0)
- arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
+ if (sel->val)
+ arg.max_len = sel->val;
+ else if (kmsg->msg.msg_inq > 1)
+ arg.max_len = min_not_zero(sel->val, (ssize_t) kmsg->msg.msg_inq);
- ret = io_buffers_peek(req, &arg);
+ /* if mshot limited, ensure we don't go over */
+ if (sr->flags & IORING_RECV_MSHOT_LIM)
+ arg.max_len = min_not_zero(arg.max_len, sr->mshot_total_len);
+ ret = io_buffers_peek(req, &arg, sel);
if (unlikely(ret < 0))
return ret;
+ if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->vec.iovec) {
+ kmsg->vec.nr = ret;
+ kmsg->vec.iovec = arg.iovs;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+ if (arg.partial_map)
+ sr->flags |= IORING_RECV_PARTIAL_MAP;
+
/* special case 1 vec, can be a fast path */
if (ret == 1) {
sr->buf = arg.iovs[0].iov_base;
@@ -1106,20 +1143,14 @@ static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg
}
iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
arg.out_len);
- if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
- kmsg->free_iov_nr = ret;
- kmsg->free_iov = arg.iovs;
- req->flags |= REQ_F_NEED_CLEANUP;
- }
} else {
- void __user *buf;
+ size_t len = sel->val;
- *len = sr->len;
- buf = io_buffer_select(req, len, issue_flags);
- if (!buf)
+ *sel = io_buffer_select(req, &len, sr->buf_group, issue_flags);
+ if (!sel->addr)
return -ENOBUFS;
- sr->buf = buf;
- sr->len = *len;
+ sr->buf = sel->addr;
+ sr->len = len;
map_ubuf:
ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
&kmsg->msg.msg_iter);
@@ -1134,11 +1165,11 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
+ struct io_br_sel sel;
struct socket *sock;
unsigned flags;
int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
- size_t len = sr->len;
bool mshot_finished;
if (!(req->flags & REQ_F_POLLED) &&
@@ -1154,9 +1185,11 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
flags |= MSG_DONTWAIT;
retry_multishot:
+ sel.buf_list = NULL;
if (io_do_buffer_select(req)) {
- ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
- if (unlikely(ret)) {
+ sel.val = sr->len;
+ ret = io_recv_buf_select(req, kmsg, &sel, issue_flags);
+ if (unlikely(ret < 0)) {
kmsg->msg.msg_inq = -1;
goto out_free;
}
@@ -1172,19 +1205,14 @@ retry_multishot:
ret = sock_recvmsg(sock, &kmsg->msg, flags);
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
- if (issue_flags & IO_URING_F_MULTISHOT) {
- io_kbuf_recycle(req, issue_flags);
- return IOU_ISSUE_SKIP_COMPLETE;
- }
-
- return -EAGAIN;
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
+ return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, sel.buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1200,12 +1228,76 @@ out_free:
else if (sr->done_io)
ret = sr->done_io;
else
- io_kbuf_recycle(req, issue_flags);
+ io_kbuf_recycle(req, sel.buf_list, issue_flags);
- if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
+ sel.val = ret;
+ if (!io_recv_finish(req, kmsg, &sel, mshot_finished, issue_flags))
goto retry_multishot;
- return ret;
+ return sel.val;
+}
+
+int io_recvzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
+ unsigned ifq_idx;
+
+ if (unlikely(sqe->addr2 || sqe->addr || sqe->addr3))
+ return -EINVAL;
+
+ ifq_idx = READ_ONCE(sqe->zcrx_ifq_idx);
+ zc->ifq = xa_load(&req->ctx->zcrx_ctxs, ifq_idx);
+ if (!zc->ifq)
+ return -EINVAL;
+
+ zc->len = READ_ONCE(sqe->len);
+ zc->flags = READ_ONCE(sqe->ioprio);
+ if (READ_ONCE(sqe->msg_flags))
+ return -EINVAL;
+ if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT))
+ return -EINVAL;
+ /* multishot required */
+ if (!(zc->flags & IORING_RECV_MULTISHOT))
+ return -EINVAL;
+ /* All data completions are posted as aux CQEs. */
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+
+ return 0;
+}
+
+int io_recvzc(struct io_kiocb *req, unsigned int issue_flags)
+{
+ struct io_recvzc *zc = io_kiocb_to_cmd(req, struct io_recvzc);
+ struct socket *sock;
+ unsigned int len;
+ int ret;
+
+ if (!(req->flags & REQ_F_POLLED) &&
+ (zc->flags & IORING_RECVSEND_POLL_FIRST))
+ return -EAGAIN;
+
+ sock = sock_from_file(req->file);
+ if (unlikely(!sock))
+ return -ENOTSOCK;
+
+ len = zc->len;
+ ret = io_zcrx_recv(req, zc->ifq, sock, 0, issue_flags, &zc->len);
+ if (len && zc->len == 0) {
+ io_req_set_res(req, 0, 0);
+
+ return IOU_COMPLETE;
+ }
+ if (unlikely(ret <= 0) && ret != -EAGAIN) {
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ if (ret == IOU_REQUEUE)
+ return IOU_REQUEUE;
+
+ req_set_fail(req);
+ io_req_set_res(req, ret, 0);
+ return IOU_COMPLETE;
+ }
+ return IOU_RETRY;
}
void io_send_zc_cleanup(struct io_kiocb *req)
@@ -1222,16 +1314,18 @@ void io_send_zc_cleanup(struct io_kiocb *req)
}
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
-#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
+#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \
+ IORING_SEND_VECTORIZED)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_async_msghdr *iomsg;
struct io_kiocb *notif;
+ int ret;
zc->done_io = 0;
- req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
@@ -1245,7 +1339,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
notif->cqe.user_data = req->cqe.user_data;
notif->cqe.res = 0;
notif->cqe.flags = IORING_CQE_F_NOTIF;
- req->flags |= REQ_F_NEED_CLEANUP;
+ req->flags |= REQ_F_NEED_CLEANUP | REQ_F_POLL_NO_LAZY;
zc->flags = READ_ONCE(sqe->ioprio);
if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
@@ -1260,28 +1354,35 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
}
- if (req->opcode != IORING_OP_SEND_ZC) {
- if (unlikely(sqe->addr2 || sqe->file_index))
- return -EINVAL;
- if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
- return -EINVAL;
- }
-
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
- zc->buf_index = READ_ONCE(sqe->buf_index);
+ req->buf_index = READ_ONCE(sqe->buf_index);
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
+ if (io_is_compat(req->ctx))
zc->msg_flags |= MSG_CMSG_COMPAT;
-#endif
- if (unlikely(!io_msg_alloc_async(req)))
+
+ iomsg = io_msg_alloc_async(req);
+ if (unlikely(!iomsg))
return -ENOMEM;
- if (req->opcode != IORING_OP_SENDMSG_ZC)
- return io_send_setup(req, sqe);
- return io_sendmsg_setup(req, sqe);
+
+ if (req->opcode == IORING_OP_SEND_ZC) {
+ ret = io_send_setup(req, sqe);
+ } else {
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
+ ret = io_sendmsg_setup(req, sqe);
+ }
+ if (unlikely(ret))
+ return ret;
+
+ if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
+ iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+ return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
+ }
+ iomsg->msg.sg_from_iter = io_sg_from_iter;
+ return 0;
}
static int io_sg_from_iter_iovec(struct sk_buff *skb,
@@ -1338,41 +1439,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
- int ret;
- if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
- struct io_ring_ctx *ctx = req->ctx;
- struct io_rsrc_node *node;
-
- ret = -EFAULT;
- io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->buf_table, sr->buf_index);
- if (node) {
- io_req_assign_buf_node(sr->notif, node);
- ret = 0;
- }
- io_ring_submit_unlock(ctx, issue_flags);
+ WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
- if (unlikely(ret))
- return ret;
-
- ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter,
- node->buf, (u64)(uintptr_t)sr->buf,
- sr->len);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter;
- } else {
- ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
- if (unlikely(ret))
- return ret;
- ret = io_notif_account_mem(sr->notif, sr->len);
- if (unlikely(ret))
- return ret;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
- }
-
- return ret;
+ sr->notif->buf_index = req->buf_index;
+ return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+ (u64)(uintptr_t)sr->buf, sr->len,
+ ITER_SOURCE, issue_flags);
}
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
@@ -1393,7 +1466,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
- if (!zc->done_io) {
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
ret = io_send_zc_import(req, issue_flags);
if (unlikely(ret))
return ret;
@@ -1418,7 +1492,6 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -1437,10 +1510,11 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(zc->notif);
+ zc->notif = NULL;
io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
@@ -1451,6 +1525,19 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
+ if (req->flags & REQ_F_IMPORT_BUFFER) {
+ unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
+ int ret;
+
+ sr->notif->buf_index = req->buf_index;
+ ret = io_import_reg_vec(ITER_SOURCE, &kmsg->msg.msg_iter,
+ sr->notif, &kmsg->vec, uvec_segs,
+ issue_flags);
+ if (unlikely(ret))
+ return ret;
+ req->flags &= ~REQ_F_IMPORT_BUFFER;
+ }
+
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
@@ -1469,7 +1556,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
- kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (unlikely(ret < min_ret)) {
@@ -1478,7 +1564,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
@@ -1497,10 +1582,11 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!(issue_flags & IO_URING_F_UNLOCKED)) {
io_notif_flush(sr->notif);
+ sr->notif = NULL;
io_req_msg_cleanup(req, 0);
}
io_req_set_res(req, ret, IORING_CQE_F_MORE);
- return IOU_OK;
+ return IOU_COMPLETE;
}
void io_sendrecv_fail(struct io_kiocb *req)
@@ -1583,19 +1669,11 @@ retry:
put_unused_fd(fd);
ret = PTR_ERR(file);
if (ret == -EAGAIN && force_nonblock &&
- !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
- /*
- * if it's multishot and polled, we don't need to
- * return EAGAIN to arm the poll infra since it
- * has already been done
- */
- if (issue_flags & IO_URING_F_MULTISHOT)
- return IOU_ISSUE_SKIP_COMPLETE;
- return ret;
- }
+ !(accept->iou_flags & IORING_ACCEPT_DONTWAIT))
+ return IOU_RETRY;
+
if (ret == -ERESTARTSYS)
ret = -EINTR;
- req_set_fail(req);
} else if (!fixed) {
fd_install(fd, file);
ret = fd;
@@ -1608,23 +1686,17 @@ retry:
if (!arg.is_empty)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
- if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
- io_req_set_res(req, ret, cflags);
- return IOU_OK;
- }
-
- if (ret < 0)
- return ret;
- if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
+ if (ret >= 0 && (req->flags & REQ_F_APOLL_MULTISHOT) &&
+ io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
goto retry;
- if (issue_flags & IO_URING_F_MULTISHOT)
- return IOU_ISSUE_SKIP_COMPLETE;
- return -EAGAIN;
+ return IOU_RETRY;
}
io_req_set_res(req, ret, cflags);
- return IOU_STOP_MULTISHOT;
+ if (ret < 0)
+ req_set_fail(req);
+ return IOU_COMPLETE;
}
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -1678,7 +1750,7 @@ int io_socket(struct io_kiocb *req, unsigned int issue_flags)
sock->file_slot);
}
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -1708,6 +1780,13 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
+ if (connect->in_progress) {
+ struct poll_table_struct pt = { ._key = EPOLLERR };
+
+ if (vfs_poll(req->file, &pt) & EPOLLERR)
+ goto get_sock_err;
+ }
+
file_flags = force_nonblock ? O_NONBLOCK : 0;
ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
@@ -1730,8 +1809,10 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
* which means the previous result is good. For both of these,
* grab the sock_error() and use that for the completion.
*/
- if (ret == -EBADFD || ret == -EISCONN)
+ if (ret == -EBADFD || ret == -EISCONN) {
+get_sock_err:
ret = sock_error(sock_from_file(req->file)->sk);
+ }
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1740,7 +1821,7 @@ out:
req_set_fail(req);
io_req_msg_cleanup(req, issue_flags);
io_req_set_res(req, ret, 0);
- return IOU_OK;
+ return IOU_COMPLETE;
}
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -1811,11 +1892,6 @@ void io_netmsg_cache_free(const void *entry)
{
struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
- if (kmsg->free_iov) {
- kasan_mempool_unpoison_object(kmsg->free_iov,
- kmsg->free_iov_nr * sizeof(struct iovec));
- io_netmsg_iovec_free(kmsg);
- }
+ io_vec_free(&kmsg->vec);
kfree(kmsg);
}
-#endif