summaryrefslogtreecommitdiff
path: root/block/bio.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-07-24 09:54:26 -0700
committerJens Axboe <axboe@kernel.dk>2023-07-24 19:55:16 -0600
commitcd1d83e24e689f25de7e34bea697971750138d5f (patch)
tree788fef148baffc1e83b709ac030addd27ce63495 /block/bio.c
parent8f63fef5867fb5e8c29d9c14b6d739bfc1869d32 (diff)
block: tidy up the bio full checks in bio_add_hw_page
bio_add_hw_page already checks if the number of bytes trying to be added even fit into max_hw_sectors limit of the queue. Remove the call to bio_full and just do a check for the smaller of the number of segments in the bio and the queue max segments limit, and do this cheap check before the more expensive gap to previous check. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jinyoung Choi <j-young.choi@samsung.com> Link: https://lore.kernel.org/r/20230724165433.117645-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/block/bio.c b/block/bio.c
index 8672179213b9..72488ecea47a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1014,6 +1014,10 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
return len;
+ if (bio->bi_vcnt >=
+ min(bio->bi_max_vecs, queue_max_segments(q)))
+ return 0;
+
/*
* If the queue doesn't support SG gaps and adding this segment
* would create a gap, disallow it.
@@ -1023,12 +1027,6 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
return 0;
}
- if (bio_full(bio, len))
- return 0;
-
- if (bio->bi_vcnt >= queue_max_segments(q))
- return 0;
-
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
bio->bi_vcnt++;
bio->bi_iter.bi_size += len;