summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-04-11 17:03:26 -0600
committerJens Axboe <axboe@kernel.dk>2022-04-11 17:06:13 -0600
commit2804ecd8d3e3730b4f999cc1ff4b2441e1f4d513 (patch)
tree168ef66b171945071461f5319c7db7f105c96e1b /fs
parent6f83ab22adcb77a5824d2c274dace0d99e21319f (diff)
io_uring: move apoll->events cache
In preparation for fixing a regression with pulling in an extra cacheline for IO that doesn't usually touch the last cacheline of the io_kiocb, move the cached location of apoll->events to space shared with some other completion data. Like cflags, this isn't used until after the request has been completed, so we can piggy back on top of comp_list. Fixes: 81459350d581 ("io_uring: cache req->apoll->events in req->cflags") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b4a5e2a6aa9c..3a97535d0550 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -916,8 +916,12 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
- /* used by request caches, completion batching and iopoll */
- struct io_wq_work_node comp_list;
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ int apoll_events;
+ };
atomic_t refs;
atomic_t poll_refs;
struct io_task_work io_task_work;
@@ -5833,7 +5837,6 @@ static void io_poll_remove_entries(struct io_kiocb *req)
static int io_poll_check_events(struct io_kiocb *req, bool locked)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_iocb *poll = io_poll_get_single(req);
int v;
/* req->task == current here, checking PF_EXITING is safe */
@@ -5850,17 +5853,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
return -ECANCELED;
if (!req->result) {
- struct poll_table_struct pt = { ._key = req->cflags };
+ struct poll_table_struct pt = { ._key = req->apoll_events };
if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
req->result = -EBADF;
else
- req->result = vfs_poll(req->file, &pt) & req->cflags;
+ req->result = vfs_poll(req->file, &pt) & req->apoll_events;
}
/* multishot, just fill an CQE and proceed */
- if (req->result && !(req->cflags & EPOLLONESHOT)) {
- __poll_t mask = mangle_poll(req->result & poll->events);
+ if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+ __poll_t mask = mangle_poll(req->result & req->apoll_events);
bool filled;
spin_lock(&ctx->completion_lock);
@@ -5938,7 +5941,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
* CPU. We want to avoid pulling in req->apoll->events for that
* case.
*/
- req->cflags = events;
+ req->apoll_events = events;
if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func;
else
@@ -6330,7 +6333,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL;
io_req_set_refcount(req);
- req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+ req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
return 0;
}