summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-10-30 11:07:14 +0100
committerJens Axboe <axboe@kernel.dk>2022-11-01 09:12:24 -0600
commit64b36075eb0e50af6f59047b5f698a9f2bb2b4fd (patch)
tree411815f10bee1d8f5ce412a9c0a099b21d93b878 /block
parentffb86425ee2cadbe573c483b789aab2dd57aeb7b (diff)
block: split elevator_switch
Split an elevator_disable helper from elevator_switch for the case where we want to switch to no scheduler at all. This includes removing the pointless elevator_switch_mq helper and removing the switch to no schedule logic from blk_mq_init_sched. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20221030100714.876891-8-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-sched.c7
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c77
4 files changed, 40 insertions, 47 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 68227240fdea..23d1a90fec42 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -564,13 +564,6 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
unsigned long i;
int ret;
- if (!e) {
- blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
- q->elevator = NULL;
- q->nr_requests = q->tag_set->queue_depth;
- return 0;
- }
-
/*
* Default to double of smaller one between hw queue_depth and 128,
* since we don't split into sync/async like the old code did.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 623e8a506539..a78538586a40 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4588,7 +4588,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
__elevator_get(qe->type);
qe->type = q->elevator->type;
list_add(&qe->node, head);
- elevator_switch(q, NULL);
+ elevator_disable(q);
mutex_unlock(&q->sysfs_lock);
return true;
diff --git a/block/blk.h b/block/blk.h
index 7f9e089ab1f7..f1398fb96cec 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -278,6 +278,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
void blk_insert_flush(struct request *rq);
int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
+void elevator_disable(struct request_queue *q);
void elevator_exit(struct request_queue *q);
int elv_register_queue(struct request_queue *q, bool uevent);
void elv_unregister_queue(struct request_queue *q);
diff --git a/block/elevator.c b/block/elevator.c
index 5781f5d50bb8..800e0038be0d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -554,39 +554,6 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
-static int elevator_switch_mq(struct request_queue *q,
- struct elevator_type *new_e)
-{
- int ret;
-
- lockdep_assert_held(&q->sysfs_lock);
-
- if (q->elevator) {
- elv_unregister_queue(q);
- elevator_exit(q);
- }
-
- ret = blk_mq_init_sched(q, new_e);
- if (ret)
- goto out;
-
- if (new_e) {
- ret = elv_register_queue(q, true);
- if (ret) {
- elevator_exit(q);
- goto out;
- }
- }
-
- if (new_e)
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
- else
- blk_add_trace_msg(q, "elv switch: none");
-
-out:
- return ret;
-}
-
static inline bool elv_support_iosched(struct request_queue *q)
{
if (!queue_is_mq(q) ||
@@ -691,19 +658,51 @@ void elevator_init_mq(struct request_queue *q)
*/
int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- int err;
+ int ret;
lockdep_assert_held(&q->sysfs_lock);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
- err = elevator_switch_mq(q, new_e);
+ if (q->elevator) {
+ elv_unregister_queue(q);
+ elevator_exit(q);
+ }
+ ret = blk_mq_init_sched(q, new_e);
+ if (ret)
+ goto out_unfreeze;
+
+ ret = elv_register_queue(q, true);
+ if (ret) {
+ elevator_exit(q);
+ goto out_unfreeze;
+ }
+ blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+
+out_unfreeze:
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
+ return ret;
+}
+
+void elevator_disable(struct request_queue *q)
+{
+ lockdep_assert_held(&q->sysfs_lock);
- return err;
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ elv_unregister_queue(q);
+ elevator_exit(q);
+ blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
+ q->elevator = NULL;
+ q->nr_requests = q->tag_set->queue_depth;
+ blk_add_trace_msg(q, "elv switch: none");
+
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
}
/*
@@ -722,9 +721,9 @@ static int elevator_change(struct request_queue *q, const char *elevator_name)
* Special case for mq, turn off scheduling
*/
if (!strncmp(elevator_name, "none", 4)) {
- if (!q->elevator)
- return 0;
- return elevator_switch(q, NULL);
+ if (q->elevator)
+ elevator_disable(q);
+ return 0;
}
if (q->elevator && elevator_match(q->elevator->type, elevator_name))