diff options
-rw-r--r-- | block/blk-mq.c | 39 |
1 files changed, 27 insertions, 12 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 80c20700bce8..299aac458185 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4936,25 +4936,40 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) blk_mq_quiesce_queue(q); if (blk_mq_is_shared_tags(set->flags)) { + /* + * Shared tags, for sched tags, we allocate max initially hence + * tags can't grow, see blk_mq_alloc_sched_tags(). + */ if (q->elevator) blk_mq_tag_update_sched_shared_tags(q); else blk_mq_tag_resize_shared_tags(set, nr); - } else { + } else if (!q->elevator) { + /* + * Non-shared hardware tags, nr is already checked from + * queue_requests_store() and tags can't grow. + */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->tags) continue; - /* - * If we're using an MQ scheduler, just update the - * scheduler queue depth. This is similar to what the - * old code would do. - */ - if (hctx->sched_tags) - ret = blk_mq_tag_update_depth(hctx, - &hctx->sched_tags, nr); - else - ret = blk_mq_tag_update_depth(hctx, - &hctx->tags, nr); + sbitmap_queue_resize(&hctx->tags->bitmap_tags, + nr - hctx->tags->nr_reserved_tags); + } + } else if (nr <= q->elevator->et->nr_requests) { + /* Non-shared sched tags, and tags don't grow. */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx->sched_tags) + continue; + sbitmap_queue_resize(&hctx->sched_tags->bitmap_tags, + nr - hctx->sched_tags->nr_reserved_tags); + } + } else { + /* Non-shared sched tags, and tags grow */ + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx->sched_tags) + continue; + ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, + nr); if (ret) goto out; } |