summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-04-10 09:54:54 -0600
committerJens Axboe <axboe@fb.com>2017-04-28 08:10:15 -0600
commit9f993737906b30d7b2454a38637d1f70ffd60f2f (patch)
treede9f50b9f8411ad9306796746900195a0b6f45e6 /block
parent60ae36ad0340b1ba88530d6a5e141455dd3afd81 (diff)
blk-mq: unify hctx delayed_run_work and run_work
They serve the exact same purpose. Get rid of the non-delayed work variant, and just run it without delay for the normal case. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c27
2 files changed, 7 insertions, 22 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6bd4d1754d29..37939672d4df 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -269,7 +269,7 @@ void blk_sync_queue(struct request_queue *q)
int i;
queue_for_each_hw_ctx(q, hctx, i) {
- cancel_work_sync(&hctx->run_work);
+ cancel_delayed_work_sync(&hctx->run_work);
cancel_delayed_work_sync(&hctx->delay_work);
}
} else {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index e6aad49c1686..5c68fce87ffc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1166,13 +1166,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
put_cpu();
}
- if (msecs == 0)
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work);
- else
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->delayed_run_work,
- msecs_to_jiffies(msecs));
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work,
+ msecs_to_jiffies(msecs));
}
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
@@ -1224,7 +1220,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- cancel_work(&hctx->run_work);
+ cancel_delayed_work_sync(&hctx->run_work);
cancel_delayed_work(&hctx->delay_work);
set_bit(BLK_MQ_S_STOPPED, &hctx->state);
}
@@ -1282,17 +1278,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
- hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
-
- __blk_mq_run_hw_queue(hctx);
-}
-
-static void blk_mq_delayed_run_work_fn(struct work_struct *work)
-{
- struct blk_mq_hw_ctx *hctx;
-
- hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
-
+ hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
__blk_mq_run_hw_queue(hctx);
}
@@ -1898,8 +1884,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
- INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
- INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);