summaryrefslogtreecommitdiff
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-04-16 10:48:08 -0600
committerJens Axboe <axboe@fb.com>2014-04-16 14:15:25 -0600
commit70f4db639c5b2479e08657392cbf3ba3cceea11c (patch)
tree58a5acc839c2d0dd73436fcd6a95c8fe50bbccff /block/blk-mq.c
parent1b4a325858f695a9b5041313602d34b36f463724 (diff)
blk-mq: add blk_mq_delay_queue
Add a blk-mq equivalent to blk_delay_queue so that the scsi layer can ask to be kicked again after a delay. Signed-off-by: Christoph Hellwig <hch@lst.de> Modified by me to kill the unnecessary preempt disable/enable in the delayed workqueue handler. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c45
1 files changed, 39 insertions, 6 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da3808823e44..0cf52dddfa6b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -640,7 +640,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
__blk_mq_run_hw_queue(hctx);
else if (hctx->queue->nr_hw_queues == 1)
- kblockd_schedule_delayed_work(&hctx->delayed_work, 0);
+ kblockd_schedule_delayed_work(&hctx->run_work, 0);
else {
unsigned int cpu;
@@ -651,7 +651,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* just queue on the first CPU.
*/
cpu = cpumask_first(hctx->cpumask);
- kblockd_schedule_delayed_work_on(cpu, &hctx->delayed_work, 0);
+ kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
}
}
@@ -675,7 +675,8 @@ EXPORT_SYMBOL(blk_mq_run_queues);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- cancel_delayed_work(&hctx->delayed_work);
+ cancel_delayed_work(&hctx->run_work);
+ cancel_delayed_work(&hctx->delay_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
EXPORT_SYMBOL(blk_mq_stop_hw_queue);
@@ -717,15 +718,46 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
-static void blk_mq_work_fn(struct work_struct *work)
+static void blk_mq_run_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
- hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work);
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
__blk_mq_run_hw_queue(hctx);
}
+static void blk_mq_delay_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
+
+ if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
+ __blk_mq_run_hw_queue(hctx);
+}
+
+void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+ unsigned long tmo = msecs_to_jiffies(msecs);
+
+ if (hctx->queue->nr_hw_queues == 1)
+ kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
+ else {
+ unsigned int cpu;
+
+ /*
+ * It'd be great if the workqueue API had a way to pass
+ * in a mask and had some smarts for more clever placement
+ * than the first CPU. Or we could round-robin here. For now,
+ * just queue on the first CPU.
+ */
+ cpu = cpumask_first(hctx->cpumask);
+ kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
+ }
+}
+EXPORT_SYMBOL(blk_mq_delay_queue);
+
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
struct request *rq, bool at_head)
{
@@ -1179,7 +1211,8 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
- INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn);
+ INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;