summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 11:25:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 11:25:08 -0700
commit044f1daaaaf7c86bc4fcf433848b7baae236946b (patch)
tree55a5b94c75cc6e51992ee3b5d7c49878c7ae7760 /block/blk-mq-sched.c
parentd557d1b58b3546bab2c5bc2d624c5709840e6b10 (diff)
parentdaaadb3e9453ab89c2e113a2d1df8e19e30944cc (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes and updates from Jens Axboe: "Some fixes and followup features/changes that should go in, in this merge window. This contains: - Two fixes for lightnvm from Javier, fixing problems in the new code merge previously in this merge window. - A fix from Jan for the backing device changes, fixing an issue in NFS that causes a failure to mount on certain setups. - A change from Christoph, cleaning up the blk-mq init and exit request paths. - Remove elevator_change(), which is now unused. From Bart. - A fix for queue operation invocation on a dead queue, from Bart. - A series fixing up mtip32xx for blk-mq scheduling, removing a bandaid we previously had in place for this. From me. - A regression fix for this series, fixing a case where we wait on workqueue flushing from an invalid (non-blocking) context. From me. - A fix/optimization from Ming, ensuring that we don't both quiesce and freeze a queue at the same time. - A fix from Peter on lock ordering for CPU hotplug. Not a real problem right now, but will be once the CPU hotplug rework goes in. - A series from Omar, cleaning up out blk-mq debugfs support, and adding support for exporting info from schedulers in debugfs as well. This is really useful in debugging stalls or livelocks. From Omar" * 'for-linus' of git://git.kernel.dk/linux-block: (28 commits) mq-deadline: add debugfs attributes kyber: add debugfs attributes blk-mq-debugfs: allow schedulers to register debugfs attributes blk-mq: untangle debugfs and sysfs blk-mq: move debugfs declarations to a separate header file blk-mq: Do not invoke queue operations on a dead queue blk-mq-debugfs: get rid of a bunch of boilerplate blk-mq-debugfs: rename hw queue directories from <n> to hctx<n> blk-mq-debugfs: don't open code strstrip() blk-mq-debugfs: error on long write to queue "state" file blk-mq-debugfs: clean up flag definitions blk-mq-debugfs: separate flags with | nfs: Fix bdi handling for cloned superblocks block/mq: Cure cpu hotplug lock inversion lightnvm: fix bad back free on error path lightnvm: create cmd before allocating request blk-mq: don't use sync workqueue flushing from drivers mtip32xx: convert internal commands to regular block infrastructure mtip32xx: cleanup internal tag assumptions block: don't call blk_mq_quiesce_queue() after queue is frozen ...
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 8b361e192e8a..1f5b692526ae 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
+#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-wbt.h"
@@ -82,11 +83,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
- /*
- * For a reserved tag, allocate a normal request since we might
- * have driver dependencies on the value of the internal tag.
- */
- if (e && !(data->flags & BLK_MQ_REQ_RESERVED)) {
+ if (e) {
data->flags |= BLK_MQ_REQ_INTERNAL;
/*
@@ -476,6 +473,8 @@ int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
+
return 0;
}
@@ -487,6 +486,8 @@ void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
if (!e)
return;
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
@@ -523,8 +524,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
if (ret)
goto err;
- if (e->ops.mq.init_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_register_sched(q);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (e->ops.mq.init_hctx) {
ret = e->ops.mq.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
@@ -533,6 +536,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return ret;
}
}
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
return 0;
@@ -548,14 +552,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- if (e->type->ops.mq.exit_hctx) {
- queue_for_each_hw_ctx(q, hctx, i) {
- if (hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
- hctx->sched_data = NULL;
- }
+ queue_for_each_hw_ctx(q, hctx, i) {
+ blk_mq_debugfs_unregister_sched_hctx(hctx);
+ if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
+ e->type->ops.mq.exit_hctx(hctx, i);
+ hctx->sched_data = NULL;
}
}
+ blk_mq_debugfs_unregister_sched(q);
if (e->type->ops.mq.exit_sched)
e->type->ops.mq.exit_sched(e);
blk_mq_sched_tags_teardown(q);