summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-11-04 12:45:51 -0600
committerJens Axboe <axboe@kernel.dk>2021-11-04 12:54:33 -0600
commitc98cb5bbdab10d187aff9b4e386210eb2332af96 (patch)
treeb1cbcd39a591ee32896da9146abb490fa34d9183 /block
parent71539717c10521114403d27e171c9cbe35dcd900 (diff)
block: make bio_queue_enter() fast-path available inline
Just a prep patch for shifting the queue enter logic. This moves the expected fast path inline, and leaves __bio_queue_enter() as an out-of-line function call. We don't want to inline the latter, as it's mostly slow path code. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c28
-rw-r--r--block/blk.h34
2 files changed, 35 insertions, 27 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 35a87c06276e..9ca3ddd154d4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
-static bool blk_try_enter_queue(struct request_queue *q, bool pm)
-{
- rcu_read_lock();
- if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
- goto fail;
-
- /*
- * The code that increments the pm_only counter must ensure that the
- * counter is globally visible before the queue is unfrozen.
- */
- if (blk_queue_pm_only(q) &&
- (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
- goto fail_put;
-
- rcu_read_unlock();
- return true;
-
-fail_put:
- blk_queue_exit(q);
-fail:
- rcu_read_unlock();
- return false;
-}
-
/**
* blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer
@@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
return 0;
}
-static inline int bio_queue_enter(struct bio *bio)
+int __bio_queue_enter(struct request_queue *q, struct bio *bio)
{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-
while (!blk_try_enter_queue(q, false)) {
struct gendisk *disk = bio->bi_bdev->bd_disk;
diff --git a/block/blk.h b/block/blk.h
index 7afffd548daf..814d9632d43e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
+int __bio_queue_enter(struct request_queue *q, struct bio *bio);
+
+static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
+{
+ rcu_read_lock();
+ if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
+ goto fail;
+
+ /*
+ * The code that increments the pm_only counter must ensure that the
+ * counter is globally visible before the queue is unfrozen.
+ */
+ if (blk_queue_pm_only(q) &&
+ (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
+ goto fail_put;
+
+ rcu_read_unlock();
+ return true;
+
+fail_put:
+ blk_queue_exit(q);
+fail:
+ rcu_read_unlock();
+ return false;
+}
+
+static inline int bio_queue_enter(struct bio *bio)
+{
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+
+ if (blk_try_enter_queue(q, false))
+ return 0;
+ return __bio_queue_enter(q, bio);
+}
#define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,