summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-01-09 08:29:50 -0800
committerJens Axboe <axboe@kernel.dk>2018-01-09 09:31:15 -0700
commit358f70da49d77c43f2ca11b5da584213b2add29c (patch)
tree51278948dbff954a8cede7523bf06a38dbb8051a
parent67818d25738b1c9ffb8541ca875b2ae3304869d5 (diff)
blk-mq: make blk_abort_request() trigger timeout path
With issue/complete and timeout paths now using the generation number and state based synchronization, blk_abort_request() is the only one which depends on REQ_ATOM_COMPLETE for arbitrating completion. There's no reason for blk_abort_request() to be a completely separate path. This patch makes blk_abort_request() piggyback on the timeout path instead of trying to terminate the request directly. This removes the last dependency on REQ_ATOM_COMPLETE in blk-mq. Note that this makes blk_abort_request() asynchronous - it initiates abortion but the actual termination will happen after a short while, even when the caller owns the request. AFAICS, SCSI and ATA should be fine with that and I think mtip32xx and dasd should be safe but not completely sure. It'd be great if people who know the drivers take a look. v2: - Add comment explaining the lack of synchronization around ->deadline update as requested by Bart. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Asai Thambi SP <asamymuthupa@micron.com> Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Cc: Jan Hoeppner <hoeppner@linux.vnet.ibm.com> Cc: Bart Van Assche <Bart.VanAssche@wdc.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-timeout.c13
3 files changed, 10 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 50dda2ff0d85..90f6910a83f6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -820,7 +820,7 @@ struct blk_mq_timeout_data {
unsigned int nr_expired;
};
-void blk_mq_rq_timed_out(struct request *req, bool reserved)
+static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
const struct blk_mq_ops *ops = req->q->mq_ops;
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index cf01f6f8c73d..6b2d61629d48 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -94,8 +94,6 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
-extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
-
void blk_mq_release(struct request_queue *q);
/**
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 6427be7ac363..4f04cd1e0b74 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -156,12 +156,17 @@ void blk_timeout_work(struct work_struct *work)
*/
void blk_abort_request(struct request *req)
{
- if (blk_mark_rq_complete(req))
- return;
-
if (req->q->mq_ops) {
- blk_mq_rq_timed_out(req, false);
+ /*
+ * All we need to ensure is that timeout scan takes place
+ * immediately and that scan sees the new timeout value.
+ * No need for fancy synchronizations.
+ */
+ req->deadline = jiffies;
+ mod_timer(&req->q->timeout, 0);
} else {
+ if (blk_mark_rq_complete(req))
+ return;
blk_delete_timer(req);
blk_rq_timed_out(req);
}