summaryrefslogtreecommitdiff
path: root/block/blk-flush.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c88
1 files changed, 42 insertions, 46 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 8220517c2d67..43d6152897a4 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,26 +95,9 @@ static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, blk_opf_t flags);
static inline struct blk_flush_queue *
-blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
+blk_get_flush_queue(struct blk_mq_ctx *ctx)
{
- return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
-}
-
-static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
-{
- unsigned int policy = 0;
-
- if (blk_rq_sectors(rq))
- policy |= REQ_FSEQ_DATA;
-
- if (fflags & (1UL << QUEUE_FLAG_WC)) {
- if (rq->cmd_flags & REQ_PREFLUSH)
- policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
- (rq->cmd_flags & REQ_FUA))
- policy |= REQ_FSEQ_POSTFLUSH;
- }
- return policy;
+ return blk_mq_map_queue(REQ_OP_FLUSH, ctx)->fq;
}
static unsigned int blk_flush_cur_seq(struct request *rq)
@@ -130,6 +113,8 @@ static void blk_flush_restore_request(struct request *rq)
* original @rq->bio. Restore it.
*/
rq->bio = rq->biotail;
+ if (rq->bio)
+ rq->__sector = rq->bio->bi_iter.bi_sector;
/* make @rq a normal request */
rq->rq_flags &= ~RQF_FLUSH_SEQ;
@@ -143,7 +128,7 @@ static void blk_account_io_flush(struct request *rq)
part_stat_lock();
part_stat_inc(part, ios[STAT_FLUSH]);
part_stat_add(part, nsecs[STAT_FLUSH],
- ktime_get_ns() - rq->start_time_ns);
+ blk_time_get_ns() - rq->start_time_ns);
part_stat_unlock();
}
@@ -183,13 +168,13 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->flush.list, pending);
+ list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
- list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
+ fq->flush_data_in_flight++;
spin_lock(&q->requeue_lock);
- list_add(&rq->queuelist, &q->requeue_list);
+ list_move(&rq->queuelist, &q->requeue_list);
spin_unlock(&q->requeue_lock);
blk_mq_kick_requeue_list(q);
break;
@@ -201,7 +186,7 @@ static void blk_flush_complete_seq(struct request *rq,
* flush data request completion path. Restore @rq for
* normal completion and end it.
*/
- list_del_init(&rq->flush.list);
+ list_del_init(&rq->queuelist);
blk_flush_restore_request(rq);
blk_mq_end_request(rq, error);
break;
@@ -220,7 +205,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
struct list_head *running;
struct request *rq, *n;
unsigned long flags = 0;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(flush_rq->mq_ctx);
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
@@ -257,10 +242,11 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
fq->flush_running_idx ^= 1;
/* and push the waiting requests to the next stage */
- list_for_each_entry_safe(rq, n, running, flush.list) {
+ list_for_each_entry_safe(rq, n, running, queuelist) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}
@@ -291,7 +277,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
struct request *first_rq =
- list_first_entry(pending, struct request, flush.list);
+ list_first_entry(pending, struct request, queuelist);
struct request *flush_rq = fq->flush_rq;
/* C1 described at the top of this file */
@@ -299,7 +285,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
return;
/* C2 and C3 */
- if (!list_empty(&fq->flush_data_in_flight) &&
+ if (fq->flush_data_in_flight &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
return;
@@ -323,16 +309,9 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->mq_ctx = first_rq->mq_ctx;
flush_rq->mq_hctx = first_rq->mq_hctx;
- if (!q->elevator) {
+ if (!q->elevator)
flush_rq->tag = first_rq->tag;
-
- /*
- * We borrow data request's driver tag, so have to mark
- * this flush request as INFLIGHT for avoiding double
- * account of this driver tag
- */
- flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
- } else
+ else
flush_rq->internal_tag = first_rq->internal_tag;
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -362,7 +341,7 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(ctx);
if (q->elevator) {
WARN_ON(rq->tag < 0);
@@ -374,6 +353,12 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
* the comment in flush_end_io().
*/
spin_lock_irqsave(&fq->mq_flush_lock, flags);
+ fq->flush_data_in_flight--;
+ /*
+ * May have been corrupted by rq->rq_next reuse, we need to
+ * re-initialize rq->queuelist before reusing it here.
+ */
+ INIT_LIST_HEAD(&rq->queuelist);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
@@ -384,7 +369,6 @@ static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
static void blk_rq_init_flush(struct request *rq)
{
rq->flush.seq = 0;
- INIT_LIST_HEAD(&rq->flush.list);
rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
rq->end_io = mq_flush_data_end_io;
@@ -398,19 +382,32 @@ static void blk_rq_init_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
- unsigned long fflags = q->queue_flags; /* may change, cache */
- unsigned int policy = blk_flush_policy(fflags, rq);
- struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
+ struct blk_flush_queue *fq = blk_get_flush_queue(rq->mq_ctx);
+ bool supports_fua = q->limits.features & BLK_FEAT_FUA;
+ unsigned int policy = 0;
/* FLUSH/FUA request must never be merged */
WARN_ON_ONCE(rq->bio != rq->biotail);
+ if (blk_rq_sectors(rq))
+ policy |= REQ_FSEQ_DATA;
+
+ /*
+ * Check which flushes we need to sequence for this operation.
+ */
+ if (blk_queue_write_cache(q)) {
+ if (rq->cmd_flags & REQ_PREFLUSH)
+ policy |= REQ_FSEQ_PREFLUSH;
+ if ((rq->cmd_flags & REQ_FUA) && !supports_fua)
+ policy |= REQ_FSEQ_POSTFLUSH;
+ }
+
/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
+ if (!supports_fua)
rq->cmd_flags &= ~REQ_FUA;
/*
@@ -443,9 +440,9 @@ bool blk_insert_flush(struct request *rq)
* the post flush, and then just pass the command on.
*/
blk_rq_init_flush(rq);
- rq->flush.seq |= REQ_FSEQ_POSTFLUSH;
+ rq->flush.seq |= REQ_FSEQ_PREFLUSH;
spin_lock_irq(&fq->mq_flush_lock);
- list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
+ fq->flush_data_in_flight++;
spin_unlock_irq(&fq->mq_flush_lock);
return false;
default:
@@ -496,7 +493,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
INIT_LIST_HEAD(&fq->flush_queue[0]);
INIT_LIST_HEAD(&fq->flush_queue[1]);
- INIT_LIST_HEAD(&fq->flush_data_in_flight);
return fq;