From 8e15c0e71b8ae64fb7163532860f8d608165281f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 4 Apr 2023 13:39:46 +0100 Subject: io_uring/rsrc: keep cached refs per node We cache refs of the current node (i.e. ctx->rsrc_node) in ctx->rsrc_cached_refs. We'll be moving away from atomics, so move the cached refs in struct io_rsrc_node for now. It's a prep patch and shouldn't change anything in practise. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9edc3669c1d71b06c2dca78b2b2b8bb9292738b9.1680576071.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/rsrc.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'io_uring/rsrc.c') diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index f2da9e251e3f..1e7c960737fd 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -36,9 +36,11 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, void io_rsrc_refs_drop(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { - if (ctx->rsrc_cached_refs) { - io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs); - ctx->rsrc_cached_refs = 0; + struct io_rsrc_node *node = ctx->rsrc_node; + + if (node && node->cached_refs) { + io_rsrc_put_node(node, node->cached_refs); + node->cached_refs = 0; } } @@ -151,11 +153,11 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slo *slot = NULL; } -void io_rsrc_refs_refill(struct io_ring_ctx *ctx) +void io_rsrc_refs_refill(struct io_ring_ctx *ctx, struct io_rsrc_node *node) __must_hold(&ctx->uring_lock) { - ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH; - refcount_add(IO_RSRC_REF_BATCH, &ctx->rsrc_node->refs); + node->cached_refs += IO_RSRC_REF_BATCH; + refcount_add(IO_RSRC_REF_BATCH, &node->refs); } static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) @@ -300,6 +302,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, if (!ctx->rsrc_node) { ctx->rsrc_node = ctx->rsrc_backup_node; ctx->rsrc_backup_node = NULL; + ctx->rsrc_node->cached_refs = 0; } } -- cgit