summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-15 09:44:38 -0600
committerJens Axboe <axboe@kernel.dk>2021-10-18 08:51:52 -0600
commit2ff0682da6e09c1e0db63a2d2abcd4efb531c8db (patch)
tree33c4b58cfd5f1e64981aa654043081e531d0e4fe /block/blk-mq-sched.h
parent90b8faa0e8de1b02b619fb33f6c6e1e13e7d1d70 (diff)
block: store elevator state in request
Add an rq private RQF_ELV flag, which tells the block layer that this request was initialized on a queue that has an IO scheduler attached. This allows for faster checking in the fast path, rather than having to deference rq->q later on. Elevator switching does full quiesce of the queue before detaching an IO scheduler, so it's safe to cache this in the request itself. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r--block/blk-mq-sched.h27
1 files changed, 16 insertions, 11 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index fe252278ed9a..98836106b25f 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -56,29 +56,34 @@ static inline bool
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
- struct elevator_queue *e = q->elevator;
-
- if (e && e->type->ops.allow_merge)
- return e->type->ops.allow_merge(q, rq, bio);
+ if (rq->rq_flags & RQF_ELV) {
+ struct elevator_queue *e = q->elevator;
+ if (e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
+ }
return true;
}
static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
- struct elevator_queue *e = rq->q->elevator;
+ if (rq->rq_flags & RQF_ELV) {
+ struct elevator_queue *e = rq->q->elevator;
- if (e && e->type->ops.completed_request)
- e->type->ops.completed_request(rq, now);
+ if (e->type->ops.completed_request)
+ e->type->ops.completed_request(rq, now);
+ }
}
static inline void blk_mq_sched_requeue_request(struct request *rq)
{
- struct request_queue *q = rq->q;
- struct elevator_queue *e = q->elevator;
+ if (rq->rq_flags & RQF_ELV) {
+ struct request_queue *q = rq->q;
+ struct elevator_queue *e = q->elevator;
- if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
- e->type->ops.requeue_request(rq);
+ if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
+ e->type->ops.requeue_request(rq);
+ }
}
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)