summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2021-06-23 11:36:34 +0200
committerJens Axboe <axboe@kernel.dk>2021-06-24 18:43:55 -0600
commitfd2ef39cc9a6b9c4c41864ac506906c52f94b06a (patch)
tree0d9ad0660f5947bccc862825bcdaf9f3bfd20789 /block
parenta921c655f2033dd1ce1379128efe881dda23ea37 (diff)
blk: Fix lock inversion between ioc lock and bfqd lock
Lockdep complains about lock inversion between ioc->lock and bfqd->lock: bfqd -> ioc: put_io_context+0x33/0x90 -> ioc->lock grabbed blk_mq_free_request+0x51/0x140 blk_put_request+0xe/0x10 blk_attempt_req_merge+0x1d/0x30 elv_attempt_insert_merge+0x56/0xa0 blk_mq_sched_try_insert_merge+0x4b/0x60 bfq_insert_requests+0x9e/0x18c0 -> bfqd->lock grabbed blk_mq_sched_insert_requests+0xd6/0x2b0 blk_mq_flush_plug_list+0x154/0x280 blk_finish_plug+0x40/0x60 ext4_writepages+0x696/0x1320 do_writepages+0x1c/0x80 __filemap_fdatawrite_range+0xd7/0x120 sync_file_range+0xac/0xf0 ioc->bfqd: bfq_exit_icq+0xa3/0xe0 -> bfqd->lock grabbed put_io_context_active+0x78/0xb0 -> ioc->lock grabbed exit_io_context+0x48/0x50 do_exit+0x7e9/0xdd0 do_group_exit+0x54/0xc0 To avoid this inversion we change blk_mq_sched_try_insert_merge() to not free the merged request but rather leave that upto the caller similarly to blk_mq_sched_try_merge(). And in bfq_insert_requests() we make sure to free all the merged requests after dropping bfqd->lock. Fixes: aee69d78dec0 ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler") Reviewed-by: Ming Lei <ming.lei@redhat.com> Acked-by: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/20210623093634.27879-3-jack@suse.cz Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c6
-rw-r--r--block/blk-merge.c19
-rw-r--r--block/blk-mq-sched.c5
-rw-r--r--block/blk-mq-sched.h3
-rw-r--r--block/blk-mq.h11
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c11
-rw-r--r--block/mq-deadline-main.c5
8 files changed, 41 insertions, 21 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 9433d38e486c..727955918563 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2345,9 +2345,9 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
+ spin_unlock_irq(&bfqd->lock);
if (free)
blk_mq_free_request(free);
- spin_unlock_irq(&bfqd->lock);
return ret;
}
@@ -5969,14 +5969,16 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct bfq_queue *bfqq;
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+ LIST_HEAD(free);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
bfqg_stats_update_legacy_io(q, rq);
#endif
spin_lock_irq(&bfqd->lock);
- if (blk_mq_sched_try_insert_merge(q, rq)) {
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
spin_unlock_irq(&bfqd->lock);
+ blk_mq_free_requests(&free);
return;
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4d97fb6dd226..1398b52a24b4 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -846,18 +846,15 @@ static struct request *attempt_front_merge(struct request_queue *q,
return NULL;
}
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
- struct request *next)
+/*
+ * Try to merge 'next' into 'rq'. Return true if the merge happened, false
+ * otherwise. The caller is responsible for freeing 'next' if the merge
+ * happened.
+ */
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+ struct request *next)
{
- struct request *free;
-
- free = attempt_merge(q, rq, next);
- if (free) {
- blk_put_request(free);
- return 1;
- }
-
- return 0;
+ return attempt_merge(q, rq, next);
}
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 2403a5c2b053..c838d81ac058 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -399,9 +399,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
return ret;
}
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
- return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
+ return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free);
}
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index aff037cfd8e7..5246ae040704 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -13,7 +13,8 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs);
-bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free);
void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 4b1ca7b7bbeb..d08779f77a26 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -302,6 +302,17 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
return NULL;
}
+/* Free all requests on the list */
+static inline void blk_mq_free_requests(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct request *rq = list_entry_rq(list->next);
+
+ list_del_init(&rq->queuelist);
+ blk_mq_free_request(rq);
+ }
+}
+
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
diff --git a/block/blk.h b/block/blk.h
index 4fcd7a032377..4b885c0f6708 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -224,7 +224,7 @@ ssize_t part_timeout_store(struct device *, struct device_attribute *,
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);
-int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
+bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next);
unsigned int blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
diff --git a/block/elevator.c b/block/elevator.c
index 85d0d4adbb64..52ada14cfe45 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -350,9 +350,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
- * Returns true if we merged, false otherwise
+ * Returns true if we merged, false otherwise. 'free' will contain all
+ * requests that need to be freed.
*/
-bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
+bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
struct request *__rq;
bool ret;
@@ -363,8 +365,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
/*
* First try one-hit cache.
*/
- if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
+ list_add(&rq->queuelist, free);
return true;
+ }
if (blk_queue_noxmerges(q))
return false;
@@ -378,6 +382,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
+ list_add(&rq->queuelist, free);
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c
index 4815e536091f..9db6da9ef4c6 100644
--- a/block/mq-deadline-main.c
+++ b/block/mq-deadline-main.c
@@ -719,6 +719,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct dd_per_prio *per_prio;
enum dd_prio prio;
struct dd_blkcg *blkcg;
+ LIST_HEAD(free);
lockdep_assert_held(&dd->lock);
@@ -742,8 +743,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
WARN_ON_ONCE(rq->elv.priv[0]);
rq->elv.priv[0] = blkcg;
- if (blk_mq_sched_try_insert_merge(q, rq))
+ if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+ blk_mq_free_requests(&free);
return;
+ }
trace_block_rq_insert(rq);