From 0effb390c4bac1a484f0ca6ad3f1d183fcde882b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 17 May 2023 10:42:28 -0700 Subject: block: mq-deadline: Handle requeued requests correctly Start dispatching from the start of a zone instead of from the starting position of the most recently dispatched request. If a zoned write is requeued with an LBA that is lower than already inserted zoned writes, make sure that it is submitted first. Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Cc: Damien Le Moal Cc: Ming Lei Signed-off-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230517174230.897144-11-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 91b689261d30..e90879869c90 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -156,13 +156,28 @@ deadline_latter_request(struct request *rq) return NULL; } -/* Return the first request for which blk_rq_pos() >= pos. */ +/* + * Return the first request for which blk_rq_pos() >= @pos. For zoned devices, + * return the first request after the start of the zone containing @pos. + */ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio, enum dd_data_dir data_dir, sector_t pos) { struct rb_node *node = per_prio->sort_list[data_dir].rb_node; struct request *rq, *res = NULL; + if (!node) + return NULL; + + rq = rb_entry_rq(node); + /* + * A zoned write may have been requeued with a starting position that + * is below that of the most recently dispatched request. Hence, for + * zoned writes, start searching from the start of a zone. + */ + if (blk_rq_is_seq_zoned_write(rq)) + pos -= round_down(pos, rq->q->limits.chunk_sectors); + while (node) { rq = rb_entry_rq(node); if (blk_rq_pos(rq) >= pos) { @@ -806,6 +821,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, list_add(&rq->queuelist, &per_prio->dispatch); rq->fifo_time = jiffies; } else { + struct list_head *insert_before; + deadline_add_rq_rb(per_prio, rq); if (rq_mergeable(rq)) { @@ -818,7 +835,20 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * set expire time and add to fifo list */ rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; - list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); + insert_before = &per_prio->fifo_list[data_dir]; +#ifdef CONFIG_BLK_DEV_ZONED + /* + * Insert zoned writes such that requests are sorted by + * position per zone. + */ + if (blk_rq_is_seq_zoned_write(rq)) { + struct request *rq2 = deadline_latter_request(rq); + + if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq)) + insert_before = &rq2->queuelist; + } +#endif + list_add_tail(&rq->queuelist, insert_before); } } -- cgit