summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-09-14 16:18:54 +0200
committerJens Axboe <axboe@fb.com>2016-09-15 08:42:03 -0600
commit7d7e0f90b70f6c5367c2d1c9a7e87dd228bd0816 (patch)
tree6105df4466a36e85a24ead3b67a35d31f9f37011 /block
parentbdd17e75cd97c5c39feee409890a91d0396640fe (diff)
blk-mq: remove ->map_queue
All drivers use the default, so provide an inline version of it. If we ever need other queue mapping we can add an optional method back, although supporting will also require major changes to the queue setup code. This provides better code generation, and better debugability as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c40
-rw-r--r--block/blk-mq.h6
-rw-r--r--block/blk.h11
5 files changed, 25 insertions, 43 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d308def812db..6a14b68b9135 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, int error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
}
@@ -325,7 +325,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
flush_rq->tag = first_rq->tag;
fq->orig_rq = first_rq;
- hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
}
@@ -358,7 +358,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
/*
* After populating an empty queue, kick it to avoid stall. Read
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 729bac3a673b..16028130289f 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -301,8 +301,7 @@ static int bt_get(struct blk_mq_alloc_data *data,
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = data->q->mq_ops->map_queue(data->q,
- data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) {
bt = &data->hctx->tags->breserved_tags;
} else {
@@ -726,7 +725,7 @@ u32 blk_mq_unique_tag(struct request *rq)
int hwq = 0;
if (q->mq_ops) {
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
hwq = hctx->queue_num;
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c9499f118ef6..6e077a9d61a8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -245,7 +245,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return ERR_PTR(ret);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
@@ -254,7 +254,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
blk_mq_put_ctx(ctx);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
ctx = alloc_data.ctx;
@@ -338,11 +338,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
void blk_mq_free_request(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
-
- hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
- blk_mq_free_hctx_request(hctx, rq);
+ blk_mq_free_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -1074,9 +1070,7 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -1093,12 +1087,10 @@ static void blk_mq_insert_requests(struct request_queue *q,
bool from_schedule)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
trace_block_unplug(q, depth, !from_schedule);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
* offline now
@@ -1232,7 +1224,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_queue_enter_live(q);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;
@@ -1246,7 +1238,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
trace_block_sleeprq(q, bio, op);
ctx = blk_mq_get_ctx(q);
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ hctx = blk_mq_map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
ctx = alloc_data.ctx;
@@ -1263,8 +1255,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
{
int ret;
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
- rq->mq_ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
struct blk_mq_queue_data bd = {
.rq = rq,
.list = NULL,
@@ -1468,15 +1459,6 @@ run_queue:
return cookie;
}
-/*
- * Default mapping to a software queue, since we use one per CPU.
- */
-struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
-{
- return q->queue_hw_ctx[q->mq_map[cpu]];
-}
-EXPORT_SYMBOL(blk_mq_map_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
@@ -1810,7 +1792,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
if (!cpu_online(i))
continue;
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
/*
* Set local node, IFF we have more than one hw queue. If
@@ -1848,7 +1830,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
continue;
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = q->mq_ops->map_queue(q, i);
+ hctx = blk_mq_map_queue(q, i);
cpumask_set_cpu(i, hctx->cpumask);
ctx->index_hw = hctx->nr_ctx;
@@ -2313,7 +2295,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->ops->queue_rq || !set->ops->map_queue)
+ if (!set->ops->queue_rq)
return -EINVAL;
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9087b11037b7..ec774bf4aea2 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -52,6 +52,12 @@ extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
const struct cpumask *online_mask);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
+ int cpu)
+{
+ return q->queue_hw_ctx[q->mq_map[cpu]];
+}
+
/*
* sysfs helpers
*/
diff --git a/block/blk.h b/block/blk.h
index c37492f5edaa..74444c49078f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -39,14 +39,9 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue *blk_get_flush_queue(
struct request_queue *q, struct blk_mq_ctx *ctx)
{
- struct blk_mq_hw_ctx *hctx;
-
- if (!q->mq_ops)
- return q->fq;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- return hctx->fq;
+ if (q->mq_ops)
+ return blk_mq_map_queue(q, ctx->cpu)->fq;
+ return q->fq;
}
static inline void __blk_get_queue(struct request_queue *q)