summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io-wq.c8
-rw-r--r--io_uring/memmap.c2
-rw-r--r--io_uring/net.c36
3 files changed, 31 insertions, 15 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index be91edf34f01..17dfaa0395c4 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -357,6 +357,13 @@ static void create_worker_cb(struct callback_head *cb)
worker = container_of(cb, struct io_worker, create_work);
wq = worker->wq;
acct = worker->acct;
+
+ rcu_read_lock();
+ do_create = !io_acct_activate_free_worker(acct);
+ rcu_read_unlock();
+ if (!do_create)
+ goto no_need_create;
+
raw_spin_lock(&acct->workers_lock);
if (acct->nr_workers < acct->max_workers) {
@@ -367,6 +374,7 @@ static void create_worker_cb(struct callback_head *cb)
if (do_create) {
create_io_worker(wq, acct);
} else {
+no_need_create:
atomic_dec(&acct->nr_running);
io_worker_ref_put(wq);
}
diff --git a/io_uring/memmap.c b/io_uring/memmap.c
index 725dc0bec24c..2e99dffddfc5 100644
--- a/io_uring/memmap.c
+++ b/io_uring/memmap.c
@@ -156,7 +156,7 @@ static int io_region_allocate_pages(struct io_ring_ctx *ctx,
unsigned long mmap_offset)
{
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN;
- unsigned long size = mr->nr_pages << PAGE_SHIFT;
+ size_t size = (size_t) mr->nr_pages << PAGE_SHIFT;
unsigned long nr_allocated;
struct page **pages;
void *p;
diff --git a/io_uring/net.c b/io_uring/net.c
index 35585bdc59f3..d69f2afa4f7a 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -382,6 +382,10 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
}
if (req->flags & REQ_F_BUFFER_SELECT)
return 0;
+
+ if (sr->flags & IORING_SEND_VECTORIZED)
+ return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+
return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
}
@@ -409,7 +413,7 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
}
-#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
+#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED)
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -490,6 +494,15 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
return nbufs;
}
+static int io_net_kbuf_recyle(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg, int len)
+{
+ req->flags |= REQ_F_BL_NO_RECYCLE;
+ if (req->flags & REQ_F_BUFFERS_COMMIT)
+ io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
+ return IOU_RETRY;
+}
+
static inline bool io_send_finish(struct io_kiocb *req, int *ret,
struct io_async_msghdr *kmsg,
unsigned issue_flags)
@@ -558,8 +571,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -670,8 +682,7 @@ retry_bundle:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1067,8 +1078,7 @@ retry_multishot:
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return IOU_RETRY;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1214,8 +1224,7 @@ retry_multishot:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1318,7 +1327,8 @@ void io_send_zc_cleanup(struct io_kiocb *req)
}
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
-#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
+#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \
+ IORING_SEND_VECTORIZED)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -1495,8 +1505,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@@ -1566,8 +1575,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
- req->flags |= REQ_F_BL_NO_RECYCLE;
- return -EAGAIN;
+ return io_net_kbuf_recyle(req, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;