summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-sched.c67
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk-mq-tag.c11
-rw-r--r--block/blk-mq.c13
4 files changed, 72 insertions, 21 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 996a4b2f73aa..045b6878b8c5 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -509,11 +509,9 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
-
if (hctx->sched_tags) {
blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
- blk_mq_free_rq_map(hctx->sched_tags, flags);
+ blk_mq_free_rq_map(hctx->sched_tags, set->flags);
hctx->sched_tags = NULL;
}
}
@@ -523,12 +521,10 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
unsigned int hctx_idx)
{
struct blk_mq_tag_set *set = q->tag_set;
- /* Clear HCTX_SHARED so tags are init'ed */
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
int ret;
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
- set->reserved_tags, flags);
+ set->reserved_tags, set->flags);
if (!hctx->sched_tags)
return -ENOMEM;
@@ -546,16 +542,50 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- /* Clear HCTX_SHARED so tags are freed */
- unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
-
if (hctx->sched_tags) {
- blk_mq_free_rq_map(hctx->sched_tags, flags);
+ blk_mq_free_rq_map(hctx->sched_tags, hctx->flags);
hctx->sched_tags = NULL;
}
}
}
+static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
+{
+ struct blk_mq_tag_set *set = queue->tag_set;
+ int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
+ struct blk_mq_hw_ctx *hctx;
+ int ret, i;
+
+ /*
+ * Set initial depth at max so that we don't need to reallocate for
+ * updating nr_requests.
+ */
+ ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
+ &queue->sched_breserved_tags,
+ MAX_SCHED_RQ, set->reserved_tags,
+ set->numa_node, alloc_policy);
+ if (ret)
+ return ret;
+
+ queue_for_each_hw_ctx(queue, hctx, i) {
+ hctx->sched_tags->bitmap_tags =
+ &queue->sched_bitmap_tags;
+ hctx->sched_tags->breserved_tags =
+ &queue->sched_breserved_tags;
+ }
+
+ sbitmap_queue_resize(&queue->sched_bitmap_tags,
+ queue->nr_requests - set->reserved_tags);
+
+ return 0;
+}
+
+static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
+{
+ sbitmap_queue_free(&queue->sched_bitmap_tags);
+ sbitmap_queue_free(&queue->sched_breserved_tags);
+}
+
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;
@@ -580,12 +610,18 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_sched_alloc_tags(q, hctx, i);
if (ret)
- goto err;
+ goto err_free_tags;
+ }
+
+ if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) {
+ ret = blk_mq_init_sched_shared_sbitmap(q);
+ if (ret)
+ goto err_free_tags;
}
ret = e->ops.init_sched(q, e);
if (ret)
- goto err;
+ goto err_free_sbitmap;
blk_mq_debugfs_register_sched(q);
@@ -605,7 +641,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
-err:
+err_free_sbitmap:
+ if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
+ blk_mq_exit_sched_shared_sbitmap(q);
+err_free_tags:
blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
@@ -643,5 +682,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
if (e->type->ops.exit_sched)
e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q);
+ if (blk_mq_is_sbitmap_shared(q->tag_set->flags))
+ blk_mq_exit_sched_shared_sbitmap(q);
q->elevator = NULL;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 5b18ab915c65..aff037cfd8e7 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -5,6 +5,8 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ)
+
void blk_mq_sched_assign_ioc(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index f597d40de10b..86f87346232a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
/*
@@ -590,8 +591,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
*/
if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set;
- /* Only sched tags can grow, so clear HCTX_SHARED flag */
- unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED;
struct blk_mq_tags *new;
bool ret;
@@ -602,21 +601,21 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* We need some sort of upper limit, set it high enough that
* no valid use cases should require more.
*/
- if (tdepth > 16 * BLKDEV_MAX_RQ)
+ if (tdepth > MAX_SCHED_RQ)
return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
- tags->nr_reserved_tags, flags);
+ tags->nr_reserved_tags, set->flags);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
- blk_mq_free_rq_map(new, flags);
+ blk_mq_free_rq_map(new, set->flags);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
- blk_mq_free_rq_map(*tagsptr, flags);
+ blk_mq_free_rq_map(*tagsptr, set->flags);
*tagsptr = new;
} else {
/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 001e196bdebd..f11d4018ce2e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3640,15 +3640,24 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
} else {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
nr, true);
+ if (blk_mq_is_sbitmap_shared(set->flags)) {
+ hctx->sched_tags->bitmap_tags =
+ &q->sched_bitmap_tags;
+ hctx->sched_tags->breserved_tags =
+ &q->sched_breserved_tags;
+ }
}
if (ret)
break;
if (q->elevator && q->elevator->type->ops.depth_updated)
q->elevator->type->ops.depth_updated(hctx);
}
-
- if (!ret)
+ if (!ret) {
q->nr_requests = nr;
+ if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
+ sbitmap_queue_resize(&q->sched_bitmap_tags,
+ nr - set->reserved_tags);
+ }
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);