summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2021-11-25 14:36:34 +0100
committerJens Axboe <axboe@kernel.dk>2021-11-29 06:38:51 -0700
commit790cf9c84837b232eb413b8b6b5d57817176cb23 (patch)
tree42b0a8f5652c5e17e4faf6a61f750ffe157f10e1 /block
parent639d353143fa3bfa81bbe7af263260d93d23d822 (diff)
block: Provide blk_mq_sched_get_icq()
Currently we lookup ICQ only after the request is allocated. However BFQ will want to decide how many scheduler tags it allows a given bfq queue (effectively a process) to consume based on cgroup weight. So provide a function blk_mq_sched_get_icq() so that BFQ can lookup ICQ earlier. Acked-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20211125133645.27483-1-jack@suse.cz Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-sched.c26
-rw-r--r--block/blk-mq-sched.h1
2 files changed, 16 insertions, 11 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index b942b38000e5..98c6a97729f2 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -18,9 +18,8 @@
#include "blk-mq-tag.h"
#include "blk-wbt.h"
-void blk_mq_sched_assign_ioc(struct request *rq)
+struct io_cq *blk_mq_sched_get_icq(struct request_queue *q)
{
- struct request_queue *q = rq->q;
struct io_context *ioc;
struct io_cq *icq;
@@ -28,22 +27,27 @@ void blk_mq_sched_assign_ioc(struct request *rq)
if (unlikely(!current->io_context))
create_task_io_context(current, GFP_ATOMIC, q->node);
- /*
- * May not have an IO context if it's a passthrough request
- */
+ /* May not have an IO context if context creation failed */
ioc = current->io_context;
if (!ioc)
- return;
+ return NULL;
spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q);
spin_unlock_irq(&q->queue_lock);
+ if (icq)
+ return icq;
+ return ioc_create_icq(ioc, q, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(blk_mq_sched_get_icq);
- if (!icq) {
- icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
- return;
- }
+void blk_mq_sched_assign_ioc(struct request *rq)
+{
+ struct io_cq *icq;
+
+ icq = blk_mq_sched_get_icq(rq->q);
+ if (!icq)
+ return;
get_io_context(icq->ioc);
rq->elv.icq = icq;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 25d1034952b6..add651ec06da 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -8,6 +8,7 @@
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
+struct io_cq *blk_mq_sched_get_icq(struct request_queue *q);
void blk_mq_sched_assign_ioc(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,