summaryrefslogtreecommitdiff
path: root/io_uring/rsrc.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-04-04 13:39:49 +0100
committerJens Axboe <axboe@kernel.dk>2023-04-04 09:30:39 -0600
commitef8ae64ffa9578c12e44de42604004c2cc3e9c27 (patch)
treea9aeabbb02273d7ca7ce7c38eebd96665b100156 /io_uring/rsrc.c
parent03adabe81abb20221079b48343783b4327bd1186 (diff)
io_uring/rsrc: protect node refs with uring_lock
Currently, for nodes we have an atomic counter and some cached (non-atomic) refs protected by uring_lock. Let's put all ref manipulations under uring_lock and get rid of the atomic part. It's free as in all cases we care about we already hold the lock. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/25b142feed7d831008257d90c8b17c0115d4fc15.1680576071.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rsrc.c')
-rw-r--r--io_uring/rsrc.c30
1 files changed, 4 insertions, 26 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 1e7c960737fd..1237fc77c250 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -27,23 +27,10 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
struct io_mapped_ubuf **pimu,
struct page **last_hpage);
-#define IO_RSRC_REF_BATCH 100
-
/* only define max */
#define IORING_MAX_FIXED_FILES (1U << 20)
#define IORING_MAX_REG_BUFFERS (1U << 14)
-void io_rsrc_refs_drop(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- struct io_rsrc_node *node = ctx->rsrc_node;
-
- if (node && node->cached_refs) {
- io_rsrc_put_node(node, node->cached_refs);
- node->cached_refs = 0;
- }
-}
-
int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;
@@ -153,13 +140,6 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo
*slot = NULL;
}
-void io_rsrc_refs_refill(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
- __must_hold(&ctx->uring_lock)
-{
- node->cached_refs += IO_RSRC_REF_BATCH;
- refcount_add(IO_RSRC_REF_BATCH, &node->refs);
-}
-
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
{
struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
@@ -225,7 +205,8 @@ void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
kfree(ref_node);
}
-__cold void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
+void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
+ __must_hold(&node->rsrc_data->ctx->uring_lock)
{
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
unsigned long flags;
@@ -269,7 +250,7 @@ static struct io_rsrc_node *io_rsrc_node_alloc(void)
if (!ref_node)
return NULL;
- refcount_set(&ref_node->refs, 1);
+ ref_node->refs = 1;
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->rsrc_list);
ref_node->done = false;
@@ -283,8 +264,6 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
WARN_ON_ONCE(!ctx->rsrc_backup_node);
WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
- io_rsrc_refs_drop(ctx);
-
if (data_to_kill) {
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
@@ -295,14 +274,13 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
atomic_inc(&data_to_kill->refs);
/* put master ref */
- io_rsrc_put_node(rsrc_node, 1);
+ io_put_rsrc_node(rsrc_node);
ctx->rsrc_node = NULL;
}
if (!ctx->rsrc_node) {
ctx->rsrc_node = ctx->rsrc_backup_node;
ctx->rsrc_backup_node = NULL;
- ctx->rsrc_node->cached_refs = 0;
}
}