summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 11:20:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-09 11:20:07 -0800
commit3e28850cbd359bed841b832200f9fc208a9ef040 (patch)
treeef4e5b294f934f58fc08feb89d24291b71c01d4a /block/blk-core.c
parent1dc1f92e24d6a5479ae8ceea3e2fac69f8d9dab7 (diff)
parent26af1cd00364ce20dbec66b93ef42f9d42dc6953 (diff)
Merge tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: - Set of fixes for the batched tag allocation (Ming, me) - add_disk() error handling fix (Luis) - Nested queue quiesce fixes (Ming) - Shared tags init error handling fix (Ye) - Misc cleanups (Jean, Ming, me) * tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block: nvme: wait until quiesce is done scsi: make sure that request queue queiesce and unquiesce balanced scsi: avoid to quiesce sdev->request_queue two times blk-mq: add one API for waiting until quiesce is done blk-mq: don't free tags if the tag_set is used by other device in queue initialztion block: fix device_add_disk() kobject_create_and_add() error handling block: ensure cached plug request matches the current queue block: move queue enter logic into blk_mq_submit_bio() block: make bio_queue_enter() fast-path available inline block: split request allocation components into helpers block: have plug stored requests hold references to the queue blk-mq: update hctx->nr_active in blk_mq_end_request_batch() blk-mq: add RQF_ELV debug entry blk-mq: only try to run plug merge if request has same queue with incoming bio block: move RQF_ELV setting into allocators dm: don't stop request queue after the dm device is suspended block: replace always false argument with 'false' block: assign correct tag before doing prefetch of request blk-mq: fix redundant check of !e expression
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c61
1 files changed, 21 insertions, 40 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index ac1de7d73a45..b043de2baaac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
-static bool blk_try_enter_queue(struct request_queue *q, bool pm)
-{
- rcu_read_lock();
- if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
- goto fail;
-
- /*
- * The code that increments the pm_only counter must ensure that the
- * counter is globally visible before the queue is unfrozen.
- */
- if (blk_queue_pm_only(q) &&
- (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
- goto fail_put;
-
- rcu_read_unlock();
- return true;
-
-fail_put:
- blk_queue_exit(q);
-fail:
- rcu_read_unlock();
- return false;
-}
-
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
return 0;
}
-static inline int bio_queue_enter(struct bio *bio)
+int __bio_queue_enter(struct request_queue *q, struct bio *bio)
{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
while (!blk_try_enter_queue(q, false)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;
@@ -742,7 +716,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
return BLK_STS_OK;
}
-static noinline_for_stack bool submit_bio_checks(struct bio *bio)
+noinline_for_stack bool submit_bio_checks(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct request_queue *q = bdev_get_queue(bdev);
@@ -860,22 +834,23 @@ end_io:
return false;
}
-static void __submit_bio(struct bio *bio)
+static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{
- struct gendisk *disk = bio->bi_bdev->bd_disk;
-
if (unlikely(bio_queue_enter(bio) != 0))
return;
+ if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
+ disk->fops->submit_bio(bio);
+ blk_queue_exit(disk->queue);
+}
- if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
- goto queue_exit;
- if (!disk->fops->submit_bio) {
+static void __submit_bio(struct bio *bio)
+{
+ struct gendisk *disk = bio->bi_bdev->bd_disk;
+
+ if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
- return;
- }
- disk->fops->submit_bio(bio);
-queue_exit:
- blk_queue_exit(disk->queue);
+ else
+ __submit_bio_fops(disk, bio);
}
/*
@@ -1615,7 +1590,13 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
flush_plug_callbacks(plug, from_schedule);
if (!rq_list_empty(plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
- if (unlikely(!from_schedule && plug->cached_rq))
+ /*
+ * Unconditionally flush out cached requests, even if the unplug
+ * event came from schedule. Since we know hold references to the
+ * queue for cached requests, we don't want a blocked task holding
+ * up a queue freeze/quiesce event.
+ */
+ if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug);
}