summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-02-15 15:30:33 -0700
committerJens Axboe <axboe@kernel.dk>2024-02-15 15:37:28 -0700
commitb4ccc4dd1330a4d0db6aa4c6781631d1bab76c45 (patch)
tree9af256544b80381afafd99e2f5982da896148424 /io_uring
parent871760eb7af57accc5402142154e64f21701fa16 (diff)
io_uring/napi: enable even with a timeout of 0
1 usec is not as short as it used to be, and it makes sense to allow 0 for a busy poll timeout - this means just do one loop to check if we have anything available. Add a separate ->napi_enabled to check if napi has been enabled or not. While at it, move the writing of the ctx napi values after we've copied the old values back to userspace. This ensures that if the call fails, we'll be in the same state as we were before, rather than some indeterminate state. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/napi.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/io_uring/napi.c b/io_uring/napi.c
index b234adda7dfd..883a1a665907 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -227,12 +227,12 @@ int io_register_napi(struct io_ring_ctx *ctx, void __user *arg)
if (napi.pad[0] || napi.pad[1] || napi.pad[2] || napi.resv)
return -EINVAL;
- WRITE_ONCE(ctx->napi_busy_poll_to, napi.busy_poll_to);
- WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
-
if (copy_to_user(arg, &curr, sizeof(curr)))
return -EFAULT;
+ WRITE_ONCE(ctx->napi_busy_poll_to, napi.busy_poll_to);
+ WRITE_ONCE(ctx->napi_prefer_busy_poll, !!napi.prefer_busy_poll);
+ WRITE_ONCE(ctx->napi_enabled, true);
return 0;
}
@@ -256,6 +256,7 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
WRITE_ONCE(ctx->napi_busy_poll_to, 0);
WRITE_ONCE(ctx->napi_prefer_busy_poll, false);
+ WRITE_ONCE(ctx->napi_enabled, false);
return 0;
}
@@ -300,7 +301,7 @@ void __io_napi_busy_loop(struct io_ring_ctx *ctx, struct io_wait_queue *iowq)
{
iowq->napi_prefer_busy_poll = READ_ONCE(ctx->napi_prefer_busy_poll);
- if (!(ctx->flags & IORING_SETUP_SQPOLL) && iowq->napi_busy_poll_to)
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) && ctx->napi_enabled)
io_napi_blocking_busy_loop(ctx, iowq);
}