diff options
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r-- | block/blk-lib.c | 285 |
1 files changed, 157 insertions, 128 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c index a6954eafb8c8..4c9f20a689f7 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -35,51 +35,39 @@ static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; } -int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) +struct bio *blk_alloc_discard_bio(struct block_device *bdev, + sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) { - struct bio *bio = *biop; - sector_t bs_mask; - - if (bdev_read_only(bdev)) - return -EPERM; - if (!bdev_max_discard_sectors(bdev)) - return -EOPNOTSUPP; - - /* In case the discard granularity isn't set by buggy device driver */ - if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) { - pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n", - bdev); - return -EOPNOTSUPP; - } - - bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; - if ((sector | nr_sects) & bs_mask) - return -EINVAL; + sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); + struct bio *bio; - if (!nr_sects) - return -EINVAL; + if (!bio_sects) + return NULL; - while (nr_sects) { - sector_t req_sects = - min(nr_sects, bio_discard_limit(bdev, sector)); + bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); + if (!bio) + return NULL; + bio->bi_iter.bi_sector = *sector; + bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; + *sector += bio_sects; + *nr_sects -= bio_sects; + /* + * We can loop for a long time in here if someone does full device + * discards (like mkfs). Be nice and allow us to schedule out to avoid + * softlocking if preempt is disabled. + */ + cond_resched(); + return bio; +} - bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); - bio->bi_iter.bi_sector = sector; - bio->bi_iter.bi_size = req_sects << 9; - sector += req_sects; - nr_sects -= req_sects; - - /* - * We can loop for a long time in here, if someone does - * full device discards (like mkfs). Be nice and allow - * us to schedule out to avoid softlocking if preempt - * is disabled. - */ - cond_resched(); - } +int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) +{ + struct bio *bio; - *biop = bio; + while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, + gfp_mask))) + *biop = bio_chain_and_submit(*biop, bio); return 0; } EXPORT_SYMBOL(__blkdev_issue_discard); @@ -115,38 +103,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL(blkdev_issue_discard); -static int __blkdev_issue_write_zeroes(struct block_device *bdev, - sector_t sector, sector_t nr_sects, gfp_t gfp_mask, - struct bio **biop, unsigned flags) +static sector_t bio_write_zeroes_limit(struct block_device *bdev) { - struct bio *bio = *biop; - unsigned int max_sectors; - - if (bdev_read_only(bdev)) - return -EPERM; + sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; - /* Ensure that max_sectors doesn't overflow bi_size */ - max_sectors = bdev_write_zeroes_sectors(bdev); + return min(bdev_write_zeroes_sectors(bdev), + (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); +} - if (max_sectors == 0) - return -EOPNOTSUPP; +/* + * There is no reliable way for the SCSI subsystem to determine whether a + * device supports a WRITE SAME operation without actually performing a write + * to media. As a result, write_zeroes is enabled by default and will be + * disabled if a zeroing operation subsequently fails. This means that this + * queue limit is likely to change at runtime. + */ +static void __blkdev_issue_write_zeroes(struct block_device *bdev, + sector_t sector, sector_t nr_sects, gfp_t gfp_mask, + struct bio **biop, unsigned flags, sector_t limit) +{ while (nr_sects) { - unsigned int len = min_t(sector_t, nr_sects, max_sectors); + unsigned int len = min(nr_sects, limit); + struct bio *bio; + + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) + break; - bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); + bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; bio->bi_iter.bi_size = len << SECTOR_SHIFT; + *biop = bio_chain_and_submit(*biop, bio); + nr_sects -= len; sector += len; cond_resched(); } +} - *biop = bio; - return 0; +static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp, unsigned flags) +{ + sector_t limit = bio_write_zeroes_limit(bdev); + struct bio *bio = NULL; + struct blk_plug plug; + int ret = 0; + + blk_start_plug(&plug); + __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, + flags, limit); + if (bio) { + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) { + bio_await_chain(bio); + blk_finish_plug(&plug); + return -EINTR; + } + ret = submit_bio_wait(bio); + bio_put(bio); + } + blk_finish_plug(&plug); + + /* + * For some devices there is no non-destructive way to verify whether + * WRITE ZEROES is actually supported. These will clear the capability + * on an I/O error, in which case we'll turn any error into + * "not supported" here. + */ + if (ret && !bdev_write_zeroes_sectors(bdev)) + return -EOPNOTSUPP; + return ret; } /* @@ -162,35 +192,63 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) return min(pages, (sector_t)BIO_MAX_VECS); } -static int __blkdev_issue_zero_pages(struct block_device *bdev, +static void __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, - struct bio **biop) + struct bio **biop, unsigned int flags) { - struct bio *bio = *biop; - int bi_size = 0; - unsigned int sz; - - if (bdev_read_only(bdev)) - return -EPERM; + while (nr_sects) { + unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); + struct bio *bio; - while (nr_sects != 0) { - bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects), - REQ_OP_WRITE, gfp_mask); + bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; - while (nr_sects != 0) { - sz = min((sector_t) PAGE_SIZE, nr_sects << 9); - bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); - nr_sects -= bi_size >> 9; - sector += bi_size >> 9; - if (bi_size < sz) + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) + break; + + do { + unsigned int len, added; + + len = min_t(sector_t, + PAGE_SIZE, nr_sects << SECTOR_SHIFT); + added = bio_add_page(bio, ZERO_PAGE(0), len, 0); + if (added < len) break; - } + nr_sects -= added >> SECTOR_SHIFT; + sector += added >> SECTOR_SHIFT; + } while (nr_sects); + + *biop = bio_chain_and_submit(*biop, bio); cond_resched(); } +} - *biop = bio; - return 0; +static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp, unsigned flags) +{ + struct bio *bio = NULL; + struct blk_plug plug; + int ret = 0; + + if (flags & BLKDEV_ZERO_NOFALLBACK) + return -EOPNOTSUPP; + + blk_start_plug(&plug); + __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags); + if (bio) { + if ((flags & BLKDEV_ZERO_KILLABLE) && + fatal_signal_pending(current)) { + bio_await_chain(bio); + blk_finish_plug(&plug); + return -EINTR; + } + ret = submit_bio_wait(bio); + bio_put(bio); + } + blk_finish_plug(&plug); + + return ret; } /** @@ -216,20 +274,21 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { - int ret; - sector_t bs_mask; + sector_t limit = bio_write_zeroes_limit(bdev); - bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; - if ((sector | nr_sects) & bs_mask) - return -EINVAL; - - ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, - biop, flags); - if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) - return ret; + if (bdev_read_only(bdev)) + return -EPERM; - return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, - biop); + if (limit) { + __blkdev_issue_write_zeroes(bdev, sector, nr_sects, + gfp_mask, biop, flags, limit); + } else { + if (flags & BLKDEV_ZERO_NOFALLBACK) + return -EOPNOTSUPP; + __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, + biop, flags); + } + return 0; } EXPORT_SYMBOL(__blkdev_issue_zeroout); @@ -249,51 +308,21 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout); int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { - int ret = 0; - sector_t bs_mask; - struct bio *bio; - struct blk_plug plug; - bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); + int ret; - bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; - if ((sector | nr_sects) & bs_mask) + if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1)) return -EINVAL; + if (bdev_read_only(bdev)) + return -EPERM; -retry: - bio = NULL; - blk_start_plug(&plug); - if (try_write_zeroes) { - ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, - gfp_mask, &bio, flags); - } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { - ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, - gfp_mask, &bio); - } else { - /* No zeroing offload support */ - ret = -EOPNOTSUPP; - } - if (ret == 0 && bio) { - ret = submit_bio_wait(bio); - bio_put(bio); - } - blk_finish_plug(&plug); - if (ret && try_write_zeroes) { - if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { - try_write_zeroes = false; - goto retry; - } - if (!bdev_write_zeroes_sectors(bdev)) { - /* - * Zeroing offload support was indicated, but the - * device reported ILLEGAL REQUEST (for some devices - * there is no non-destructive way to verify whether - * WRITE ZEROES is actually supported). - */ - ret = -EOPNOTSUPP; - } + if (bdev_write_zeroes_sectors(bdev)) { + ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects, + gfp_mask, flags); + if (ret != -EOPNOTSUPP) + return ret; } - return ret; + return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags); } EXPORT_SYMBOL(blkdev_issue_zeroout); |