summaryrefslogtreecommitdiff
path: root/block/elevator.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/elevator.c')
-rw-r--r--block/elevator.c521
1 files changed, 323 insertions, 198 deletions
diff --git a/block/elevator.c b/block/elevator.c
index f05e90d4e695..5b37ef44f52d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block device elevator/IO-scheduler.
*
@@ -25,7 +26,6 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
-#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -35,14 +35,15 @@
#include <linux/hash.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-#include <linux/blk-cgroup.h>
#include <trace/events/block.h>
+#include "elevator.h"
#include "blk.h"
#include "blk-mq-sched.h"
#include "blk-pm.h"
#include "blk-wbt.h"
+#include "blk-cgroup.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
@@ -56,7 +57,7 @@ static LIST_HEAD(elv_list);
* Query io scheduler to see if the current process issuing bio may be
* merged with rq.
*/
-static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
+static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
@@ -64,7 +65,7 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
if (e->type->ops.allow_merge)
return e->type->ops.allow_merge(q, rq, bio);
- return 1;
+ return true;
}
/*
@@ -82,76 +83,45 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(elv_bio_merge_ok);
+/**
+ * elevator_match - Check whether @e's name or alias matches @name
+ * @e: Scheduler to test
+ * @name: Elevator name to test
+ *
+ * Return true if the elevator @e's name or alias matches @name.
+ */
static bool elevator_match(const struct elevator_type *e, const char *name)
{
- if (!strcmp(e->elevator_name, name))
- return true;
- if (e->elevator_alias && !strcmp(e->elevator_alias, name))
- return true;
-
- return false;
+ return !strcmp(e->elevator_name, name) ||
+ (e->elevator_alias && !strcmp(e->elevator_alias, name));
}
-/*
- * Return scheduler with name 'name'
- */
-static struct elevator_type *elevator_find(const char *name)
+static struct elevator_type *__elevator_find(const char *name)
{
struct elevator_type *e;
- list_for_each_entry(e, &elv_list, list) {
+ list_for_each_entry(e, &elv_list, list)
if (elevator_match(e, name))
return e;
- }
-
return NULL;
}
-static void elevator_put(struct elevator_type *e)
-{
- module_put(e->elevator_owner);
-}
-
-static struct elevator_type *elevator_get(struct request_queue *q,
- const char *name, bool try_loading)
+static struct elevator_type *elevator_find_get(const char *name)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
-
- e = elevator_find(name);
- if (!e && try_loading) {
- spin_unlock(&elv_list_lock);
- request_module("%s-iosched", name);
- spin_lock(&elv_list_lock);
- e = elevator_find(name);
- }
-
- if (e && !try_module_get(e->elevator_owner))
+ e = __elevator_find(name);
+ if (e && (!elevator_tryget(e)))
e = NULL;
-
spin_unlock(&elv_list_lock);
return e;
}
-static char chosen_elevator[ELV_NAME_MAX];
-
-static int __init elevator_setup(char *str)
-{
- /*
- * Be backwards-compatible with previous kernels, so users
- * won't get the wrong elevator.
- */
- strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
- return 1;
-}
-
-__setup("elevator=", elevator_setup);
-
-static struct kobj_type elv_ktype;
+static const struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
- struct elevator_type *e)
+ struct elevator_type *e, struct elevator_resources *res)
{
struct elevator_queue *eq;
@@ -159,14 +129,16 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
if (unlikely(!eq))
return NULL;
+ __elevator_get(e);
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
+ eq->et = res->et;
+ eq->elevator_data = res->data;
return eq;
}
-EXPORT_SYMBOL(elevator_alloc);
static void elevator_release(struct kobject *kobj)
{
@@ -177,14 +149,17 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void elevator_exit(struct request_queue *q, struct elevator_queue *e)
+static void elevator_exit(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
+
+ lockdep_assert_held(&q->elevator_lock);
+
+ ioc_clear_queue(q);
+
mutex_lock(&e->sysfs_lock);
- if (e->type->ops.exit_sched)
- blk_mq_exit_sched(q, e);
+ blk_mq_exit_sched(q, e);
mutex_unlock(&e->sysfs_lock);
-
- kobject_put(&e->kobj);
}
static inline void __elv_rqhash_del(struct request *rq)
@@ -326,6 +301,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
+
+ if (blk_discard_mergable(__rq))
+ return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}
@@ -340,9 +318,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
* we can append 'rq' to an existing request, so we can throw 'rq' away
* afterwards.
*
- * Returns true if we merged, false otherwise
+ * Returns true if we merged, false otherwise. 'free' will contain all
+ * requests that need to be freed.
*/
-bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
+bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
+ struct list_head *free)
{
struct request *__rq;
bool ret;
@@ -353,8 +333,10 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
/*
* First try one-hit cache.
*/
- if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
+ if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
+ list_add(&rq->queuelist, free);
return true;
+ }
if (blk_queue_noxmerges(q))
return false;
@@ -368,6 +350,7 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
break;
+ list_add(&rq->queuelist, free);
/* The merged request could be merged with others, try again */
ret = true;
rq = __rq;
@@ -422,21 +405,22 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
return NULL;
}
-#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+#define to_elv(atr) container_of_const((atr), struct elv_fs_entry, attr)
static ssize_t
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
- struct elv_fs_entry *entry = to_elv(attr);
+ const struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
- ssize_t error;
+ ssize_t error = -ENODEV;
if (!entry->show)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
- error = e->type ? entry->show(e, page) : -ENOENT;
+ if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
+ error = entry->show(e, page);
mutex_unlock(&e->sysfs_lock);
return error;
}
@@ -445,16 +429,17 @@ static ssize_t
elv_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
- struct elv_fs_entry *entry = to_elv(attr);
+ const struct elv_fs_entry *entry = to_elv(attr);
struct elevator_queue *e;
- ssize_t error;
+ ssize_t error = -ENODEV;
if (!entry->store)
return -EIO;
e = container_of(kobj, struct elevator_queue, kobj);
mutex_lock(&e->sysfs_lock);
- error = e->type ? entry->store(e, page, length) : -ENOENT;
+ if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags))
+ error = entry->store(e, page, length);
mutex_unlock(&e->sysfs_lock);
return error;
}
@@ -464,21 +449,20 @@ static const struct sysfs_ops elv_sysfs_ops = {
.store = elv_attr_store,
};
-static struct kobj_type elv_ktype = {
+static const struct kobj_type elv_ktype = {
.sysfs_ops = &elv_sysfs_ops,
.release = elevator_release,
};
-int elv_register_queue(struct request_queue *q)
+static int elv_register_queue(struct request_queue *q,
+ struct elevator_queue *e,
+ bool uevent)
{
- struct elevator_queue *e = q->elevator;
int error;
- lockdep_assert_held(&q->sysfs_lock);
-
- error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
+ error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
if (!error) {
- struct elv_fs_entry *attr = e->type->elevator_attrs;
+ const struct elv_fs_entry *attr = e->type->elevator_attrs;
if (attr) {
while (attr->attr.name) {
if (sysfs_create_file(&e->kobj, &attr->attr))
@@ -486,30 +470,39 @@ int elv_register_queue(struct request_queue *q)
attr++;
}
}
- kobject_uevent(&e->kobj, KOBJ_ADD);
- e->registered = 1;
+ if (uevent)
+ kobject_uevent(&e->kobj, KOBJ_ADD);
+
+ /*
+ * Sched is initialized, it is ready to export it via
+ * debugfs
+ */
+ blk_mq_sched_reg_debugfs(q);
+ set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
}
return error;
}
-void elv_unregister_queue(struct request_queue *q)
+static void elv_unregister_queue(struct request_queue *q,
+ struct elevator_queue *e)
{
- lockdep_assert_held(&q->sysfs_lock);
-
- if (q) {
- struct elevator_queue *e = q->elevator;
-
+ if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
- e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
+
+ /* unexport via debugfs before exiting sched */
+ blk_mq_sched_unreg_debugfs(q);
}
}
int elv_register(struct elevator_type *e)
{
- char *def = "";
+ /* finish request is mandatory */
+ if (WARN_ON_ONCE(!e->ops.finish_request))
+ return -EINVAL;
+ /* insert_requests and dispatch_request are mandatory */
+ if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
+ return -EINVAL;
/* create icq_cache if requested */
if (e->icq_size) {
@@ -527,7 +520,7 @@ int elv_register(struct elevator_type *e)
/* register, don't allow duplicate names */
spin_lock(&elv_list_lock);
- if (elevator_find(e->elevator_name)) {
+ if (__elevator_find(e->elevator_name)) {
spin_unlock(&elv_list_lock);
kmem_cache_destroy(e->icq_cache);
return -EBUSY;
@@ -535,8 +528,8 @@ int elv_register(struct elevator_type *e)
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
- def);
+ printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
+
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
@@ -560,181 +553,304 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
-int elevator_switch_mq(struct request_queue *q,
- struct elevator_type *new_e)
+/*
+ * Switch to new_e io scheduler.
+ *
+ * If switching fails, we are most likely running out of memory and not able
+ * to restore the old io scheduler, so leaving the io scheduler being none.
+ */
+static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
{
- int ret;
+ struct elevator_type *new_e = NULL;
+ int ret = 0;
- lockdep_assert_held(&q->sysfs_lock);
+ WARN_ON_ONCE(q->mq_freeze_depth == 0);
+ lockdep_assert_held(&q->elevator_lock);
- if (q->elevator) {
- if (q->elevator->registered)
- elv_unregister_queue(q);
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
+ if (strncmp(ctx->name, "none", 4)) {
+ new_e = elevator_find_get(ctx->name);
+ if (!new_e)
+ return -EINVAL;
}
- ret = blk_mq_init_sched(q, new_e);
- if (ret)
- goto out;
+ blk_mq_quiesce_queue(q);
+
+ if (q->elevator) {
+ ctx->old = q->elevator;
+ elevator_exit(q);
+ }
if (new_e) {
- ret = elv_register_queue(q);
- if (ret) {
- elevator_exit(q, q->elevator);
- goto out;
- }
+ ret = blk_mq_init_sched(q, new_e, &ctx->res);
+ if (ret)
+ goto out_unfreeze;
+ ctx->new = q->elevator;
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
+ q->elevator = NULL;
+ q->nr_requests = q->tag_set->queue_depth;
+ }
+ blk_add_trace_msg(q, "elv switch: %s", ctx->name);
+
+out_unfreeze:
+ blk_mq_unquiesce_queue(q);
+
+ if (ret) {
+ pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n",
+ new_e->elevator_name);
}
if (new_e)
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
- else
- blk_add_trace_msg(q, "elv switch: none");
+ elevator_put(new_e);
+ return ret;
+}
+
+static void elv_exit_and_release(struct elv_change_ctx *ctx,
+ struct request_queue *q)
+{
+ struct elevator_queue *e;
+ unsigned memflags;
+
+ memflags = blk_mq_freeze_queue(q);
+ mutex_lock(&q->elevator_lock);
+ e = q->elevator;
+ elevator_exit(q);
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ if (e) {
+ blk_mq_free_sched_res(&ctx->res, ctx->type, q->tag_set);
+ kobject_put(&e->kobj);
+ }
+}
+
+static int elevator_change_done(struct request_queue *q,
+ struct elv_change_ctx *ctx)
+{
+ int ret = 0;
-out:
+ if (ctx->old) {
+ struct elevator_resources res = {
+ .et = ctx->old->et,
+ .data = ctx->old->elevator_data
+ };
+ bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT,
+ &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
+ blk_mq_free_sched_res(&res, ctx->old->type, q->tag_set);
+ kobject_put(&ctx->old->kobj);
+ if (enable_wbt)
+ wbt_enable_default(q->disk);
+ }
+ if (ctx->new) {
+ ret = elv_register_queue(q, ctx->new, !ctx->no_uevent);
+ if (ret)
+ elv_exit_and_release(ctx, q);
+ }
return ret;
}
/*
- * For blk-mq devices, we default to using mq-deadline, if available, for single
- * queue devices. If deadline isn't available OR we have multiple queues,
- * default to "none".
+ * Switch this queue to the given IO scheduler.
*/
-int elevator_init_mq(struct request_queue *q)
+static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
{
- struct elevator_type *e;
- int err = 0;
+ unsigned int memflags;
+ struct blk_mq_tag_set *set = q->tag_set;
+ int ret = 0;
- if (q->nr_hw_queues != 1)
- return 0;
+ lockdep_assert_held(&set->update_nr_hwq_lock);
+ if (strncmp(ctx->name, "none", 4)) {
+ ret = blk_mq_alloc_sched_res(q, ctx->type, &ctx->res,
+ set->nr_hw_queues);
+ if (ret)
+ return ret;
+ }
+
+ memflags = blk_mq_freeze_queue(q);
/*
- * q->sysfs_lock must be held to provide mutual exclusion between
- * elevator_switch() and here.
+ * May be called before adding disk, when there isn't any FS I/O,
+ * so freezing queue plus canceling dispatch work is enough to
+ * drain any dispatch activities originated from passthrough
+ * requests, then no need to quiesce queue which may add long boot
+ * latency, especially when lots of disks are involved.
+ *
+ * Disk isn't added yet, so verifying queue lock only manually.
*/
- mutex_lock(&q->sysfs_lock);
- if (unlikely(q->elevator))
- goto out_unlock;
+ blk_mq_cancel_work_sync(q);
+ mutex_lock(&q->elevator_lock);
+ if (!(q->elevator && elevator_match(q->elevator->type, ctx->name)))
+ ret = elevator_switch(q, ctx);
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+ if (!ret)
+ ret = elevator_change_done(q, ctx);
- e = elevator_get(q, "mq-deadline", false);
- if (!e)
- goto out_unlock;
+ /*
+ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+ if (!ctx->new)
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
- err = blk_mq_init_sched(q, e);
- if (err)
- elevator_put(e);
-out_unlock:
- mutex_unlock(&q->sysfs_lock);
- return err;
+ return ret;
}
-
/*
- * switch to new_e io scheduler. be careful not to introduce deadlocks -
- * we don't free the old io scheduler, before we have allocated what we
- * need for the new one. this way we have a chance of going back to the old
- * one, if the new one fails init for some reason.
+ * The I/O scheduler depends on the number of hardware queues, this forces a
+ * reattachment when nr_hw_queues changes.
*/
-static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
+void elv_update_nr_hw_queues(struct request_queue *q,
+ struct elv_change_ctx *ctx)
{
- int err;
+ struct blk_mq_tag_set *set = q->tag_set;
+ int ret = -ENODEV;
- lockdep_assert_held(&q->sysfs_lock);
+ WARN_ON_ONCE(q->mq_freeze_depth == 0);
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
-
- err = elevator_switch_mq(q, new_e);
-
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
+ if (ctx->type && !blk_queue_dying(q) && blk_queue_registered(q)) {
+ mutex_lock(&q->elevator_lock);
+ /* force to reattach elevator after nr_hw_queue is updated */
+ ret = elevator_switch(q, ctx);
+ mutex_unlock(&q->elevator_lock);
+ }
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, ctx));
- return err;
+ /*
+ * Free sched resource if it's allocated but we couldn't switch elevator.
+ */
+ if (!ctx->new)
+ blk_mq_free_sched_res(&ctx->res, ctx->type, set);
}
/*
- * Switch this queue to the given IO scheduler.
+ * Use the default elevator settings. If the chosen elevator initialization
+ * fails, fall back to the "none" elevator (no elevator).
*/
-static int __elevator_change(struct request_queue *q, const char *name)
+void elevator_set_default(struct request_queue *q)
{
- char elevator_name[ELV_NAME_MAX];
- struct elevator_type *e;
+ struct elv_change_ctx ctx = {
+ .name = "mq-deadline",
+ .no_uevent = true,
+ };
+ int err;
- /* Make sure queue is not in the middle of being removed */
- if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
- return -ENOENT;
+ /* now we allow to switch elevator */
+ blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q);
+
+ if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
+ return;
/*
- * Special case for mq, turn off scheduling
+ * For single queue devices, default to using mq-deadline. If we
+ * have multiple queues or mq-deadline is not available, default
+ * to "none".
*/
- if (!strncmp(name, "none", 4))
- return elevator_switch(q, NULL);
-
- strlcpy(elevator_name, name, sizeof(elevator_name));
- e = elevator_get(q, strstrip(elevator_name), true);
- if (!e)
- return -EINVAL;
-
- if (q->elevator && elevator_match(q->elevator->type, elevator_name)) {
- elevator_put(e);
- return 0;
+ ctx.type = elevator_find_get(ctx.name);
+ if (!ctx.type)
+ return;
+
+ if ((q->nr_hw_queues == 1 ||
+ blk_mq_is_shared_tags(q->tag_set->flags))) {
+ err = elevator_change(q, &ctx);
+ if (err < 0)
+ pr_warn("\"%s\" elevator initialization, failed %d, falling back to \"none\"\n",
+ ctx.name, err);
}
+ elevator_put(ctx.type);
+}
- return elevator_switch(q, e);
+void elevator_set_none(struct request_queue *q)
+{
+ struct elv_change_ctx ctx = {
+ .name = "none",
+ };
+ int err;
+
+ err = elevator_change(q, &ctx);
+ if (err < 0)
+ pr_warn("%s: set none elevator failed %d\n", __func__, err);
}
-static inline bool elv_support_iosched(struct request_queue *q)
+static void elv_iosched_load_module(const char *elevator_name)
{
- if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
- return false;
- return true;
+ struct elevator_type *found;
+
+ spin_lock(&elv_list_lock);
+ found = __elevator_find(elevator_name);
+ spin_unlock(&elv_list_lock);
+
+ if (!found)
+ request_module("%s-iosched", elevator_name);
}
-ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
size_t count)
{
+ char elevator_name[ELV_NAME_MAX];
+ struct elv_change_ctx ctx = {};
int ret;
+ struct request_queue *q = disk->queue;
+ struct blk_mq_tag_set *set = q->tag_set;
- if (!queue_is_mq(q) || !elv_support_iosched(q))
- return count;
+ /* Make sure queue is not in the middle of being removed */
+ if (!blk_queue_registered(q))
+ return -ENOENT;
- ret = __elevator_change(q, name);
- if (!ret)
- return count;
+ /*
+ * If the attribute needs to load a module, do it before freezing the
+ * queue to ensure that the module file can be read when the request
+ * queue is the one for the device storing the module file.
+ */
+ strscpy(elevator_name, buf, sizeof(elevator_name));
+ ctx.name = strstrip(elevator_name);
+
+ elv_iosched_load_module(ctx.name);
+ ctx.type = elevator_find_get(ctx.name);
+
+ down_read(&set->update_nr_hwq_lock);
+ if (!blk_queue_no_elv_switch(q)) {
+ ret = elevator_change(q, &ctx);
+ if (!ret)
+ ret = count;
+ } else {
+ ret = -ENOENT;
+ }
+ up_read(&set->update_nr_hwq_lock);
+ if (ctx.type)
+ elevator_put(ctx.type);
return ret;
}
-ssize_t elv_iosched_show(struct request_queue *q, char *name)
+ssize_t elv_iosched_show(struct gendisk *disk, char *name)
{
- struct elevator_queue *e = q->elevator;
- struct elevator_type *elv = NULL;
- struct elevator_type *__e;
+ struct request_queue *q = disk->queue;
+ struct elevator_type *cur = NULL, *e;
int len = 0;
- if (!queue_is_mq(q))
- return sprintf(name, "none\n");
-
- if (!q->elevator)
+ mutex_lock(&q->elevator_lock);
+ if (!q->elevator) {
len += sprintf(name+len, "[none] ");
- else
- elv = e->type;
+ } else {
+ len += sprintf(name+len, "none ");
+ cur = q->elevator->type;
+ }
spin_lock(&elv_list_lock);
- list_for_each_entry(__e, &elv_list, list) {
- if (elv && elevator_match(elv, __e->elevator_name)) {
- len += sprintf(name+len, "[%s] ", elv->elevator_name);
- continue;
- }
- if (elv_support_iosched(q))
- len += sprintf(name+len, "%s ", __e->elevator_name);
+ list_for_each_entry(e, &elv_list, list) {
+ if (e == cur)
+ len += sprintf(name+len, "[%s] ", e->elevator_name);
+ else
+ len += sprintf(name+len, "%s ", e->elevator_name);
}
spin_unlock(&elv_list_lock);
- if (q->elevator)
- len += sprintf(name+len, "none");
+ len += sprintf(name+len, "\n");
+ mutex_unlock(&q->elevator_lock);
- len += sprintf(len+name, "\n");
return len;
}
@@ -761,3 +877,12 @@ struct request *elv_rb_latter_request(struct request_queue *q,
return NULL;
}
EXPORT_SYMBOL(elv_rb_latter_request);
+
+static int __init elevator_setup(char *str)
+{
+ pr_warn("Kernel parameter elevator= does not have any effect anymore.\n"
+ "Please use sysfs to set IO scheduler for individual devices.\n");
+ return 1;
+}
+
+__setup("elevator=", elevator_setup);