summaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-04-25 09:53:36 +0200
committerJens Axboe <axboe@kernel.dk>2020-04-25 09:45:44 -0600
commit8cf7961dab42c9177a556b719c15f5b9449c24d1 (patch)
treecf0e7e5d0b7f6f0ee665629b61fc91cc56ed77c7 /block/blk-core.c
parentae3cc8d8ff061d3ffca96665685550e70a86472a (diff)
block: bypass ->make_request_fn for blk-mq drivers
Call blk_mq_make_request when no ->make_request_fn is set. This is safe now that blk_alloc_queue always sets up the pointer for make_request based drivers. This avoids an indirect call in the blk-mq driver I/O fast path, which is rather expensive due to spectre mitigations. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 38e984d95e84..dffff2100888 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1073,7 +1073,10 @@ blk_qc_t generic_make_request(struct bio *bio)
/* Create a fresh bio_list for all subordinate requests */
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
- ret = q->make_request_fn(q, bio);
+ if (q->make_request_fn)
+ ret = q->make_request_fn(q, bio);
+ else
+ ret = blk_mq_make_request(q, bio);
blk_queue_exit(q);
@@ -1113,9 +1116,7 @@ EXPORT_SYMBOL(generic_make_request);
*
* This function behaves like generic_make_request(), but does not protect
* against recursion. Must only be used if the called driver is known
- * to not call generic_make_request (or direct_make_request) again from
- * its make_request function. (Calling direct_make_request again from
- * a workqueue is perfectly fine as that doesn't recurse).
+ * to be blk-mq based.
*/
blk_qc_t direct_make_request(struct bio *bio)
{
@@ -1123,20 +1124,27 @@ blk_qc_t direct_make_request(struct bio *bio)
bool nowait = bio->bi_opf & REQ_NOWAIT;
blk_qc_t ret;
+ if (WARN_ON_ONCE(q->make_request_fn))
+ goto io_error;
if (!generic_make_request_checks(bio))
return BLK_QC_T_NONE;
if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
if (nowait && !blk_queue_dying(q))
- bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
- return BLK_QC_T_NONE;
+ goto would_block;
+ goto io_error;
}
- ret = q->make_request_fn(q, bio);
+ ret = blk_mq_make_request(q, bio);
blk_queue_exit(q);
return ret;
+
+would_block:
+ bio_wouldblock_error(bio);
+ return BLK_QC_T_NONE;
+io_error:
+ bio_io_error(bio);
+ return BLK_QC_T_NONE;
}
EXPORT_SYMBOL_GPL(direct_make_request);