summaryrefslogtreecommitdiff
path: root/block/blk-mq-sched.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-04-07 12:45:20 -0600
committerJens Axboe <axboe@fb.com>2017-04-07 12:45:20 -0600
commit65f619d2535197d97067eeeef75a40f25b552e69 (patch)
tree13f96c51a2b591ed22f1771541998bb77178f685 /block/blk-mq-sched.h
parentfbbaf700e7b163a0f1704b2d542ee28be11fce21 (diff)
parent6d8c6c0f97ad8a3517c42b179c1dc8e77397d0e2 (diff)
Merge branch 'for-linus' into for-4.12/block
We've added a considerable amount of fixes for stalls and issues with the blk-mq scheduling in the 4.11 series since forking off the for-4.12/block branch. We need to do improvements on top of that for 4.12, so pull in the previous fixes to make our lives easier going forward. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-sched.h')
-rw-r--r--block/blk-mq-sched.h25
1 files changed, 8 insertions, 17 deletions
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index a75b16b123f7..3a9e6e40558b 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
-int blk_mq_sched_setup(struct request_queue *q);
-void blk_mq_sched_teardown(struct request_queue *q);
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+ unsigned int hctx_idx);
int blk_mq_sched_init(struct request_queue *q);
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
-/*
- * Mark a hardware queue and the request queue it belongs to as needing a
- * restart.
- */
-static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
-{
- struct request_queue *q = hctx->queue;
-
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
- if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
- set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
-}
-
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);