summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-12-17 08:44:05 -0700
committerJens Axboe <axboe@kernel.dk>2018-12-17 11:19:54 -0700
commitc16d6b5a9f47d0e581882269fca1d73be60208b2 (patch)
tree778a7ca836078f6cca15630f25b0d652320f5cb8 /block/blk-mq.c
parent7211aef86f79583e59b88a0aba0bc830566f7e8e (diff)
blk-mq: fix dispatch from sw queue
When a request is added to rq list of sw queue(ctx), the rq may be from a different type of hctx, especially after multi queue mapping is introduced. So when dispach request from sw queue via blk_mq_flush_busy_ctxs() or blk_mq_dequeue_from_ctx(), one request belonging to other queue type of hctx can be dispatched to current hctx in case that read queue or poll queue is enabled. This patch fixes this issue by introducing per-queue-type list. Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@redhat.com> Changed by me to not use separately cacheline aligned lists, just place them all in the same cacheline where we had just the one list and lock before. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 313f28b2d079..9c1c1544bac3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -958,9 +958,10 @@ static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
struct flush_busy_ctx_data *flush_data = data;
struct blk_mq_hw_ctx *hctx = flush_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
sbitmap_clear_bit(sb, bitnr);
spin_unlock(&ctx->lock);
return true;
@@ -992,12 +993,13 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
struct dispatch_rq_data *dispatch_data = data;
struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
list_del_init(&dispatch_data->rq->queuelist);
- if (list_empty(&ctx->rq_list))
+ if (list_empty(&ctx->rq_lists[type]))
sbitmap_clear_bit(sb, bitnr);
}
spin_unlock(&ctx->lock);
@@ -1608,15 +1610,16 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ enum hctx_type type = hctx->type;
lockdep_assert_held(&ctx->lock);
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
- list_add(&rq->queuelist, &ctx->rq_list);
+ list_add(&rq->queuelist, &ctx->rq_lists[type]);
else
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
}
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
@@ -1651,6 +1654,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
{
struct request *rq;
+ enum hctx_type type = hctx->type;
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
@@ -1662,7 +1666,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
}
spin_lock(&ctx->lock);
- list_splice_tail_init(list, &ctx->rq_list);
+ list_splice_tail_init(list, &ctx->rq_lists[type]);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
@@ -2200,13 +2204,15 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
+ enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- list_splice_init(&ctx->rq_list, &tmp);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ list_splice_init(&ctx->rq_lists[type], &tmp);
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -2343,10 +2349,13 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
for_each_possible_cpu(i) {
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
struct blk_mq_hw_ctx *hctx;
+ int k;
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
- INIT_LIST_HEAD(&__ctx->rq_list);
+ for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
+ INIT_LIST_HEAD(&__ctx->rq_lists[k]);
+
__ctx->queue = q;
/*