summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorJohn Garry <john.garry@huawei.com>2021-12-06 20:49:48 +0800
committerJens Axboe <axboe@kernel.dk>2021-12-06 13:18:47 -0700
commit8ab30a331946c34e4ba022c44df8624acea1c74e (patch)
treedcebc49ca178812b772eafb75c9dcfebba62a24b /block/blk-mq.c
parent73f3760eddc9bc32c207fff06537f98f94bef451 (diff)
blk-mq: Drop busy_iter_fn blk_mq_hw_ctx argument
The only user of blk_mq_hw_ctx blk_mq_hw_ctx argument is blk_mq_rq_inflight(). Function blk_mq_rq_inflight() uses the hctx to find the associated request queue to match against the request. However this same check is already done in caller bt_iter(), so drop this check. With that change there are no more users of busy_iter_fn blk_mq_hw_ctx argument, so drop the argument. Reviewed-by Hannes Reinecke <hare@suse.de> Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Link: https://lore.kernel.org/r/1638794990-137490-2-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0bf3523dd1f5..103c0f58853c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -127,8 +127,7 @@ struct mq_inflight {
unsigned int inflight[2];
};
-static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv,
+static bool blk_mq_check_inflight(struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
@@ -1308,14 +1307,15 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
-static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
- void *priv, bool reserved)
+static bool blk_mq_rq_inflight(struct request *rq, void *priv,
+ bool reserved)
{
/*
- * If we find a request that isn't idle and the queue matches,
- * we know the queue is busy. Return false to stop the iteration.
+ * If we find a request that isn't idle we know the queue is busy
+ * as it's checked in the iter.
+ * Return false to stop the iteration.
*/
- if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
+ if (blk_mq_request_started(rq)) {
bool *busy = priv;
*busy = true;
@@ -1377,8 +1377,7 @@ void blk_mq_put_rq_ref(struct request *rq)
__blk_mq_free_request(rq);
}
-static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv, bool reserved)
+static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
{
unsigned long *next = priv;