summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJianchao Wang <jianchao.w.wang@oracle.com>2018-12-14 09:28:18 +0800
committerJens Axboe <axboe@kernel.dk>2018-12-16 08:33:57 -0700
commit7f556a44e61d0b62d78db9a2662a5f0daef010f2 (patch)
treee4712b5c0ac800c08863d55fe4c4781d7ffc4f73 /block
parent4c9770c90fc5b6d6b6d190d108c061015f5804f7 (diff)
blk-mq: refactor the code of issue request directly
Merge blk_mq_try_issue_directly and __blk_mq_try_issue_directly into one interface to unify the interfaces to issue requests directly. The merged interface takes over the requests totally, it could insert, end or do nothing based on the return value of .queue_rq and 'bypass' parameter. Then caller needn't any other handling any more and then code could be cleaned up. And also the commit c616cbee ( blk-mq: punt failed direct issue to dispatch list ) always inserts requests to hctx dispatch list whenever get a BLK_STS_RESOURCE or BLK_STS_DEV_RESOURCE, this is overkill and will harm the merging. We just need to do that for the requests that has been through .queue_rq. This patch also could fix this. Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c103
1 files changed, 54 insertions, 49 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9690f4f8de7e..af4dc8227954 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1792,78 +1792,83 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass_insert, bool last)
+ bool bypass, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int srcu_idx;
+ bool force = false;
+ hctx_lock(hctx, &srcu_idx);
/*
- * RCU or SRCU read lock is needed before checking quiesced flag.
+ * hctx_lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass_insert' from
- * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
- * and avoid driver to try to dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass', insert
+ * and return BLK_STS_OK to caller, and avoid driver to try to
+ * dispatch again.
*/
- if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false;
- bypass_insert = false;
- goto insert;
+ bypass = false;
+ goto out_unlock;
}
- if (q->elevator && !bypass_insert)
- goto insert;
+ if (unlikely(q->elevator && !bypass))
+ goto out_unlock;
if (!blk_mq_get_dispatch_budget(hctx))
- goto insert;
+ goto out_unlock;
if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
- goto insert;
+ goto out_unlock;
}
- return __blk_mq_issue_directly(hctx, rq, cookie, last);
-insert:
- if (bypass_insert)
- return BLK_STS_RESOURCE;
-
- blk_mq_request_bypass_insert(rq, run_queue);
- return BLK_STS_OK;
-}
-
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq, blk_qc_t *cookie)
-{
- blk_status_t ret;
- int srcu_idx;
-
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
-
- hctx_lock(hctx, &srcu_idx);
-
- ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_request_bypass_insert(rq, true);
- else if (ret != BLK_STS_OK)
- blk_mq_end_request(rq, ret);
-
+ /*
+ * Always add a request that has been through
+ *.queue_rq() to the hardware dispatch list.
+ */
+ force = true;
+ ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
+out_unlock:
hctx_unlock(hctx, srcu_idx);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_DEV_RESOURCE:
+ case BLK_STS_RESOURCE:
+ if (force) {
+ blk_mq_request_bypass_insert(rq, run_queue);
+ /*
+ * We have to return BLK_STS_OK for the DM
+ * to avoid livelock. Otherwise, we return
+ * the real result to indicate whether the
+ * request is direct-issued successfully.
+ */
+ ret = bypass ? BLK_STS_OK : ret;
+ } else if (!bypass) {
+ blk_mq_sched_insert_request(rq, false,
+ run_queue, false);
+ }
+ break;
+ default:
+ if (!bypass)
+ blk_mq_end_request(rq, ret);
+ break;
+ }
+
+ return ret;
}
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
- blk_status_t ret;
- int srcu_idx;
- blk_qc_t unused_cookie;
- struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ blk_qc_t unused;
- hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
- hctx_unlock(hctx, srcu_idx);
-
- return ret;
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2004,13 +2009,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie);
+ &cookie, false, true);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);