summaryrefslogtreecommitdiff
path: root/block/blk-mq-tag.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-05-11 23:22:35 +0800
committerJens Axboe <axboe@kernel.dk>2021-05-24 06:47:22 -0600
commitbd63141d585bef14f4caf111f6d0e27fe2300ec6 (patch)
tree18c509a0a1fb0b90746396b360a5b1e5b46bae37 /block/blk-mq-tag.c
parent2e315dc07df009c3e29d6926871f62a30cfae394 (diff)
blk-mq: clear stale request in tags->rq[] before freeing one request pool
refcount_inc_not_zero() in bt_tags_iter() still may read one freed request. Fix the issue by the following approach: 1) hold a per-tags spinlock when reading ->rqs[tag] and calling refcount_inc_not_zero in bt_tags_iter() 2) clearing stale request referred via ->rqs[tag] before freeing request pool, the per-tags spinlock is held for clearing stale ->rq[tag] So after we cleared stale requests, bt_tags_iter() won't observe freed request any more, also the clearing will wait for pending request reference. The idea of clearing ->rqs[] is borrowed from John Garry's previous patch and one recent David's patch. Tested-by: John Garry <john.garry@huawei.com> Reviewed-by: David Jeffery <djeffery@redhat.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20210511152236.763464-4-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r--block/blk-mq-tag.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 544edf2c56a5..1671dae43030 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -202,10 +202,14 @@ struct bt_iter_data {
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
unsigned int bitnr)
{
- struct request *rq = tags->rqs[bitnr];
+ struct request *rq;
+ unsigned long flags;
+ spin_lock_irqsave(&tags->lock, flags);
+ rq = tags->rqs[bitnr];
if (!rq || !refcount_inc_not_zero(&rq->ref))
- return NULL;
+ rq = NULL;
+ spin_unlock_irqrestore(&tags->lock, flags);
return rq;
}
@@ -538,6 +542,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
+ spin_lock_init(&tags->lock);
if (blk_mq_is_sbitmap_shared(flags))
return tags;