summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@wdc.com>2018-04-10 17:02:40 -0600
committerJens Axboe <axboe@kernel.dk>2018-04-10 17:46:40 -0600
commit37f9579f4c31a6d698dbf3016d7bf132f9288d30 (patch)
tree3b3add4672c9e0391bead53a764718e92d131983 /block
parenta93f00b3762026dd8231f473fae9346bda07db03 (diff)
blk-mq: Avoid that submitting a bio concurrently with device removal triggers a crash
Because blkcg_exit_queue() is now called from inside blk_cleanup_queue() it is no longer safe to access cgroup information during or after the blk_cleanup_queue() call. Hence protect the generic_make_request_checks() call with blk_queue_enter() / blk_queue_exit(). Reported-by: Ming Lei <ming.lei@redhat.com> Fixes: a063057d7c73 ("block: Fix a race between request queue removal and the block cgroup controller") Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Cc: Ming Lei <ming.lei@redhat.com> Cc: Joseph Qi <joseph.qi@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index abcb8684ba67..806ce2442819 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2385,8 +2385,20 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet.
*/
struct bio_list bio_list_on_stack[2];
+ blk_mq_req_flags_t flags = 0;
+ struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
+ if (bio->bi_opf & REQ_NOWAIT)
+ flags = BLK_MQ_REQ_NOWAIT;
+ if (blk_queue_enter(q, flags) < 0) {
+ if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return ret;
+ }
+
if (!generic_make_request_checks(bio))
goto out;
@@ -2423,11 +2435,22 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- struct request_queue *q = bio->bi_disk->queue;
- blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
- BLK_MQ_REQ_NOWAIT : 0;
+ bool enter_succeeded = true;
+
+ if (unlikely(q != bio->bi_disk->queue)) {
+ if (q)
+ blk_queue_exit(q);
+ q = bio->bi_disk->queue;
+ flags = 0;
+ if (bio->bi_opf & REQ_NOWAIT)
+ flags = BLK_MQ_REQ_NOWAIT;
+ if (blk_queue_enter(q, flags) < 0) {
+ enter_succeeded = false;
+ q = NULL;
+ }
+ }
- if (likely(blk_queue_enter(q, flags) == 0)) {
+ if (enter_succeeded) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -2435,8 +2458,6 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
- blk_queue_exit(q);
-
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -2463,6 +2484,8 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */
out:
+ if (q)
+ blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);