summaryrefslogtreecommitdiff
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-14 14:39:59 -0600
committerJens Axboe <axboe@kernel.dk>2021-12-03 14:51:29 -0700
commit0a467d0fdd9594fbb449ebc93852533332c528fd (patch)
treefd368b2ce4f835da5d78e1bbd381b38aa98fd078 /block/blk-flush.c
parentceaa762527f41a431b552bc000de4b626d2d8cb7 (diff)
block: switch to atomic_t for request references
refcount_t is not as expensive as it used to be, but it's still more expensive than the io_uring method of using atomic_t and just checking for potential over/underflow. This borrows that same implementation, which in turn is based on the mm implementation from Linus. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f78bb39e589e..e4df894189ce 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -229,7 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- if (!refcount_dec_and_test(&flush_rq->ref)) {
+ if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
@@ -349,7 +349,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* and READ flush_rq->end_io
*/
smp_wmb();
- refcount_set(&flush_rq->ref, 1);
+ req_ref_set(flush_rq, 1);
blk_flush_queue_rq(flush_rq, false);
}